eduagarcia's picture
Uploading raw results for nicholasKluge/Aira-2-portuguese-124M
2b57849 verified
raw history blame
No virus
34.7 kB
{
"results": {
"assin2_rte": {
"f1_macro,all": 0.3333333333333333,
"acc,all": 0.5,
"alias": "assin2_rte"
},
"assin2_sts": {
"pearson,all": 0.023587414997509645,
"mse,all": 2.6942320261437915,
"alias": "assin2_sts"
},
"bluex": {
"acc,all": 0.20166898470097358,
"acc,exam_id__USP_2019": 0.225,
"acc,exam_id__UNICAMP_2018": 0.25925925925925924,
"acc,exam_id__UNICAMP_2020": 0.23636363636363636,
"acc,exam_id__UNICAMP_2023": 0.23255813953488372,
"acc,exam_id__UNICAMP_2024": 0.17777777777777778,
"acc,exam_id__UNICAMP_2019": 0.28,
"acc,exam_id__UNICAMP_2021_2": 0.27450980392156865,
"acc,exam_id__USP_2022": 0.061224489795918366,
"acc,exam_id__USP_2020": 0.17857142857142858,
"acc,exam_id__USP_2018": 0.16666666666666666,
"acc,exam_id__USP_2021": 0.1346153846153846,
"acc,exam_id__USP_2023": 0.13636363636363635,
"acc,exam_id__USP_2024": 0.2682926829268293,
"acc,exam_id__UNICAMP_2021_1": 0.17391304347826086,
"acc,exam_id__UNICAMP_2022": 0.23076923076923078,
"alias": "bluex"
},
"enem_challenge": {
"alias": "enem",
"acc,all": 0.18124562631210636,
"acc,exam_id__2016_2": 0.1951219512195122,
"acc,exam_id__2016": 0.1652892561983471,
"acc,exam_id__2010": 0.2222222222222222,
"acc,exam_id__2014": 0.14678899082568808,
"acc,exam_id__2023": 0.14074074074074075,
"acc,exam_id__2017": 0.1810344827586207,
"acc,exam_id__2015": 0.16806722689075632,
"acc,exam_id__2012": 0.23275862068965517,
"acc,exam_id__2022": 0.17293233082706766,
"acc,exam_id__2013": 0.1574074074074074,
"acc,exam_id__2009": 0.19130434782608696,
"acc,exam_id__2011": 0.20512820512820512
},
"faquad_nli": {
"f1_macro,all": 0.12578616352201258,
"acc,all": 0.2,
"alias": "faquad_nli"
},
"oab_exams": {
"acc,all": 0.23006833712984054,
"acc,exam_id__2014-13": 0.2375,
"acc,exam_id__2012-07": 0.1375,
"acc,exam_id__2012-09": 0.23376623376623376,
"acc,exam_id__2011-03": 0.24242424242424243,
"acc,exam_id__2012-08": 0.225,
"acc,exam_id__2015-17": 0.24358974358974358,
"acc,exam_id__2014-15": 0.21794871794871795,
"acc,exam_id__2017-24": 0.225,
"acc,exam_id__2015-16": 0.2375,
"acc,exam_id__2017-23": 0.2125,
"acc,exam_id__2011-04": 0.25,
"acc,exam_id__2010-02": 0.24,
"acc,exam_id__2016-19": 0.19230769230769232,
"acc,exam_id__2012-06": 0.2375,
"acc,exam_id__2012-06a": 0.2375,
"acc,exam_id__2013-12": 0.175,
"acc,exam_id__2017-22": 0.25,
"acc,exam_id__2010-01": 0.25882352941176473,
"acc,exam_id__2013-10": 0.2125,
"acc,exam_id__2014-14": 0.2625,
"acc,exam_id__2018-25": 0.2875,
"acc,exam_id__2016-20a": 0.3,
"acc,exam_id__2015-18": 0.25,
"acc,exam_id__2011-05": 0.2375,
"acc,exam_id__2013-11": 0.1625,
"acc,exam_id__2016-20": 0.225,
"acc,exam_id__2016-21": 0.2125,
"alias": "oab_exams"
},
"sparrow_emotion-2021-cortiz-por": {
"alias": "emotion-2021-cortiz-por",
"f1_macro,all": 0.0,
"acc,all": 0.0
},
"sparrow_hate-2019-fortuna-por": {
"alias": "hate-2019-fortuna-por",
"f1_macro,all": 0.47986684591255363,
"acc,all": 0.48
},
"sparrow_sentiment-2016-mozetic-por": {
"alias": "sentiment-2016-mozetic-por",
"f1_macro,all": 0.08695652173913043,
"acc,all": 0.15
},
"sparrow_sentiment-2018-brum-por": {
"alias": "sentiment-2018-brum-por",
"f1_macro,all": 0.19385342789598106,
"acc,all": 0.41
}
},
"configs": {
"assin2_rte": {
"task": "assin2_rte",
"group": [
"pt_benchmark",
"assin2"
],
"dataset_path": "assin2",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Premissa: {{premise}}\nHip贸tese: {{hypothesis}}\nPergunta: A hip贸tese pode ser inferida pela premissa?\nResposta:",
"doc_to_target": "{{['N茫o', 'Sim'][entailment_judgment]}}",
"description": "Abaixo cont茅m pares de premissa e hip贸tese, para cada par voc锚 deve julgar se a hip贸tese pode ser inferida a partir da premissa, responda apenas com Sim ou N茫o.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
1,
3251,
2,
3252,
3,
4,
5,
6,
3253,
7,
3254,
3255,
3256,
8,
9,
10,
3257,
11,
3258,
12,
13,
14,
15,
3259,
3260,
3261,
3262,
3263,
16,
17,
3264,
18,
3265,
3266,
3267,
19,
20,
3268,
3269,
21,
3270,
3271,
22,
3272,
3273,
23,
3274,
24,
25,
3275
],
"id_column": "sentence_pair_id"
}
},
"num_fewshot": 15,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"N茫o"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"assin2_sts": {
"task": "assin2_sts",
"group": [
"pt_benchmark",
"assin2"
],
"dataset_path": "assin2",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Qual o grau de similaridade entre as duas frases de 1,0 a 5,0?\nResposta:",
"doc_to_target": "<function assin2_float_to_pt_str at 0x7f8bf0f71800>",
"description": "Abaixo cont茅m pares de frases, para cada par voc锚 deve julgar o grau de similaridade de 1,0 a 5,0, responda apenas com o n煤mero.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
1,
3251,
2,
3252,
3,
4,
5,
6,
3253,
7,
3254,
3255,
3256,
8,
9,
10,
3257,
11,
3258,
12,
13,
14,
15,
3259,
3260,
3261,
3262,
3263,
16,
17,
3264,
18,
3265,
3266,
3267,
19,
20,
3268,
3269,
21,
3270,
3271,
22,
3272,
3273,
23,
3274,
24,
25,
3275
],
"id_column": "sentence_pair_id"
}
},
"num_fewshot": 15,
"metric_list": [
{
"metric": "pearson",
"aggregation": "pearsonr",
"higher_is_better": true
},
{
"metric": "mse",
"aggregation": "mean_squared_error",
"higher_is_better": false
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "number_filter",
"type": "float",
"range_min": 1.0,
"range_max": 5.0,
"on_outside_range": "clip",
"fallback": 5.0
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"bluex": {
"task": "bluex",
"group": [
"pt_benchmark",
"vestibular"
],
"dataset_path": "eduagarcia-temp/BLUEX_without_images",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function enem_doc_to_text at 0x7f8bf0f711c0>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir s茫o quest玫es de multipla escolha de provas de vestibular de Universidades Brasileiras, reponda apenas com as letras A, B, C, D ou E.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"USP_2018_3",
"UNICAMP_2018_2",
"USP_2018_35",
"UNICAMP_2018_16",
"USP_2018_89"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D",
"E"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
"\\b([ABCDE])\\.",
"\\b([ABCDE]) ?[.):-]",
"\\b([ABCDE])$",
"\\b([ABCDE])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8bf0f71440>",
"metadata": {
"version": 1.0
}
},
"enem_challenge": {
"task": "enem_challenge",
"task_alias": "enem",
"group": [
"pt_benchmark",
"vestibular"
],
"dataset_path": "eduagarcia/enem_challenge",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function enem_doc_to_text at 0x7f8bf0f719e0>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir s茫o quest玫es de multipla escolha do Exame Nacional do Ensino M茅dio (ENEM), reponda apenas com as letras A, B, C, D ou E.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"2022_21",
"2022_88",
"2022_143"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D",
"E"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
"\\b([ABCDE])\\.",
"\\b([ABCDE]) ?[.):-]",
"\\b([ABCDE])$",
"\\b([ABCDE])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8bf0f71c60>",
"metadata": {
"version": 1.0
}
},
"faquad_nli": {
"task": "faquad_nli",
"group": [
"pt_benchmark"
],
"dataset_path": "ruanchaves/faquad-nli",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta satisfaz a pergunta? Sim ou N茫o?",
"doc_to_target": "{{['N茫o', 'Sim'][label]}}",
"description": "Abaixo cont茅m pares de pergunta e reposta, para cada par voc锚 deve julgar resposta responde a pergunta de maneira satisfat贸ria e aparenta estar correta, escreva apenas Sim ou N茫o.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n",
"sampler_config": {
"fewshot_indices": [
1893,
949,
663,
105,
1169,
2910,
2227,
2813,
974,
558,
1503,
1958,
2918,
601,
1560,
984,
2388,
995,
2233,
1982,
165,
2788,
1312,
2285,
522,
1113,
1670,
323,
236,
1263,
1562,
2519,
1049,
432,
1167,
1394,
2022,
2551,
2194,
2187,
2282,
2816,
108,
301,
1185,
1315,
1420,
2436,
2322,
766
]
}
},
"num_fewshot": 15,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"N茫o"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"oab_exams": {
"task": "oab_exams",
"group": [
"legal_benchmark",
"pt_benchmark"
],
"dataset_path": "eduagarcia/oab_exams",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function doc_to_text at 0x7f8bf0f70b80>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir s茫o quest玫es de multipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), reponda apenas com as letras A, B, C ou D.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"2010-01_1",
"2010-01_11",
"2010-01_13",
"2010-01_23",
"2010-01_26",
"2010-01_28",
"2010-01_38",
"2010-01_48",
"2010-01_58",
"2010-01_68",
"2010-01_76",
"2010-01_83",
"2010-01_85",
"2010-01_91",
"2010-01_99"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
"\\b([ABCD])\\)",
"\\b([ABCD]) ?[.):-]",
"\\b([ABCD])$",
"\\b([ABCD])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function doc_to_text at 0x7f8bf0f70e00>",
"metadata": {
"version": 1.4
}
},
"sparrow_emotion-2021-cortiz-por": {
"task": "sparrow_emotion-2021-cortiz-por",
"task_alias": "emotion-2021-cortiz-por",
"group": [
"pt_benchmark",
"sparrow"
],
"dataset_path": "UBC-NLP/sparrow",
"dataset_name": "emotion-2021-cortiz-por",
"test_split": "validation",
"fewshot_split": "train",
"doc_to_text": "Texto: {{content}}\nPergunta: Qual a principal emo莽茫o apresentada no texto?\nResposta:",
"doc_to_target": "<function sparrow_emotion_por_trans_label at 0x7f8bf0f71080>",
"description": "Abaixo cont茅m o conte煤do de tweets de usuarios do Twitter em portugu锚s, sua tarefa 茅 extrair qual a principal emo莽茫o dos textos. Responda com apenas uma das seguintes op莽玫es:\n Admira莽茫o, Divers茫o, Raiva, Aborrecimento, Aprova莽茫o, Compaix茫o, Confus茫o, Curiosidade, Desejo, Decep莽茫o, Desaprova莽茫o, Nojo, Vergonha, Inveja, Entusiasmo, Medo, Gratid茫o, Luto, Alegria, Saudade, Amor, Nervosismo, Otimismo, Orgulho, Al铆vio, Remorso, Tristeza ou Surpresa.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Admira莽茫o",
"Divers茫o",
"Raiva",
"Aborrecimento",
"Aprova莽茫o",
"Compaix茫o",
"Confus茫o",
"Curiosidade",
"Desejo",
"Decep莽茫o",
"Desaprova莽茫o",
"Nojo",
" Vergonha",
"Inveja",
"Entusiasmo",
"Medo",
"Gratid茫o",
"Luto",
"Alegria",
"Saudade",
"Amor",
"Nervosismo",
"Otimismo",
"Orgulho",
"Al铆vio",
"Remorso",
"Tristeza",
"Surpresa"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"limit": 500,
"metadata": {
"version": 1.0
}
},
"sparrow_hate-2019-fortuna-por": {
"task": "sparrow_hate-2019-fortuna-por",
"task_alias": "hate-2019-fortuna-por",
"group": [
"pt_benchmark",
"sparrow"
],
"dataset_path": "UBC-NLP/sparrow",
"dataset_name": "hate-2019-fortuna-por",
"test_split": "validation",
"fewshot_split": "train",
"doc_to_text": "Texto: {{content}}\nPergunta: O texto cont茅m discurso de 贸dio?\nResposta:",
"doc_to_target": "{{'Sim' if label == 'Hate' else 'N茫o'}}",
"description": "Abaixo cont茅m o conte煤do de tweets de usuarios do Twitter em portugu锚s, sua tarefa 茅 classificar se o texto contem discurso de 贸dio our n茫o. Responda apenas com Sim ou N茫o.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"N茫o"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"limit": 500,
"metadata": {
"version": 1.0
}
},
"sparrow_sentiment-2016-mozetic-por": {
"task": "sparrow_sentiment-2016-mozetic-por",
"task_alias": "sentiment-2016-mozetic-por",
"group": [
"pt_benchmark",
"sparrow"
],
"dataset_path": "UBC-NLP/sparrow",
"dataset_name": "sentiment-2016-mozetic-por",
"test_split": "validation",
"fewshot_split": "train",
"doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto 茅 Positivo, Neutro ou Negativo?\nResposta:",
"doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
"description": "Abaixo cont茅m o conte煤do de tweets de usuarios do Twitter em portugu锚s, sua tarefa 茅 classificar se o sentimento do texto 茅 Positivo, Neutro ou Negativo. Responda apenas com uma das op莽玫es.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Positivo",
"Neutro",
"Negativo"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"limit": 500,
"metadata": {
"version": 1.0
}
},
"sparrow_sentiment-2018-brum-por": {
"task": "sparrow_sentiment-2018-brum-por",
"task_alias": "sentiment-2018-brum-por",
"group": [
"pt_benchmark",
"sparrow"
],
"dataset_path": "UBC-NLP/sparrow",
"dataset_name": "sentiment-2018-brum-por",
"test_split": "validation",
"fewshot_split": "train",
"doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto 茅 Positivo, Neutro ou Negativo?\nResposta:",
"doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
"description": "Abaixo cont茅m o conte煤do de tweets de usuarios do Twitter em portugu锚s, sua tarefa 茅 classificar se o sentimento do texto 茅 Positivo, Neutro ou Negativo. Responda apenas com uma das op莽玫es.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Positivo",
"Neutro",
"Negativo"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"limit": 500,
"metadata": {
"version": 1.0
}
}
},
"versions": {
"assin2_rte": 1.0,
"assin2_sts": 1.0,
"bluex": 1.0,
"enem_challenge": 1.0,
"faquad_nli": 1.0,
"oab_exams": 1.4,
"sparrow_emotion-2021-cortiz-por": 1.0,
"sparrow_hate-2019-fortuna-por": 1.0,
"sparrow_sentiment-2016-mozetic-por": 1.0,
"sparrow_sentiment-2018-brum-por": 1.0
},
"n-shot": {
"assin2_rte": 15,
"assin2_sts": 15,
"bluex": 3,
"enem_challenge": 3,
"faquad_nli": 15,
"oab_exams": 3,
"sparrow_emotion-2021-cortiz-por": 25,
"sparrow_hate-2019-fortuna-por": 25,
"sparrow_sentiment-2016-mozetic-por": 25,
"sparrow_sentiment-2018-brum-por": 25
},
"model_meta": {
"truncated": 7168,
"non_truncated": 4721,
"padded": 0,
"non_padded": 11889,
"fewshots_truncated": 20319,
"has_chat_template": false,
"chat_type": null,
"n_gpus": 1,
"accelerate_num_process": null,
"model_sha": "3e8a7225792db123fcba1062204bcf318bf128b5",
"model_dtype": "torch.float16",
"model_memory_footprint": 261468696,
"model_num_parameters": 124442880,
"model_is_loaded_in_4bit": false,
"model_is_loaded_in_8bit": false,
"model_is_quantized": null,
"model_device": "cuda:2",
"batch_size": 128,
"max_length": 1024,
"max_ctx_length": 992,
"max_gen_toks": 32
},
"task_model_meta": {
"assin2_rte": {
"sample_size": 2448,
"truncated": 0,
"non_truncated": 2448,
"padded": 0,
"non_padded": 2448,
"fewshots_truncated": 0,
"mean_seq_length": 859.2075163398692,
"min_seq_length": 845,
"max_seq_length": 897,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 15.0,
"mean_effective_fewshot_size": 15.0
},
"assin2_sts": {
"sample_size": 2448,
"truncated": 2448,
"non_truncated": 0,
"padded": 0,
"non_padded": 2448,
"fewshots_truncated": 3348,
"mean_seq_length": 1063.2075163398692,
"min_seq_length": 1049,
"max_seq_length": 1101,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 15.0,
"mean_effective_fewshot_size": 13.632352941176471
},
"bluex": {
"sample_size": 719,
"truncated": 677,
"non_truncated": 42,
"padded": 0,
"non_padded": 719,
"fewshots_truncated": 761,
"mean_seq_length": 1230.6369958275382,
"min_seq_length": 950,
"max_seq_length": 1921,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 1.9415855354659248
},
"enem_challenge": {
"sample_size": 1429,
"truncated": 1279,
"non_truncated": 150,
"padded": 0,
"non_padded": 1429,
"fewshots_truncated": 1456,
"mean_seq_length": 1107.7627711686494,
"min_seq_length": 930,
"max_seq_length": 2392,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 1.98110566829951
},
"faquad_nli": {
"sample_size": 650,
"truncated": 557,
"non_truncated": 93,
"padded": 0,
"non_padded": 650,
"fewshots_truncated": 571,
"mean_seq_length": 1008.72,
"min_seq_length": 975,
"max_seq_length": 1073,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 15.0,
"mean_effective_fewshot_size": 14.121538461538462
},
"oab_exams": {
"sample_size": 2195,
"truncated": 207,
"non_truncated": 1988,
"padded": 0,
"non_padded": 2195,
"fewshots_truncated": 207,
"mean_seq_length": 887.9448747152619,
"min_seq_length": 718,
"max_seq_length": 1161,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 2.9056947608200456
},
"sparrow_emotion-2021-cortiz-por": {
"sample_size": 500,
"truncated": 500,
"non_truncated": 0,
"padded": 0,
"non_padded": 500,
"fewshots_truncated": 4502,
"mean_seq_length": 1403.554,
"min_seq_length": 1386,
"max_seq_length": 1438,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 15.996
},
"sparrow_hate-2019-fortuna-por": {
"sample_size": 500,
"truncated": 500,
"non_truncated": 0,
"padded": 0,
"non_padded": 500,
"fewshots_truncated": 3442,
"mean_seq_length": 1331.124,
"min_seq_length": 1312,
"max_seq_length": 1392,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 18.116
},
"sparrow_sentiment-2016-mozetic-por": {
"sample_size": 500,
"truncated": 500,
"non_truncated": 0,
"padded": 0,
"non_padded": 500,
"fewshots_truncated": 2529,
"mean_seq_length": 1183.39,
"min_seq_length": 1170,
"max_seq_length": 1218,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 19.942
},
"sparrow_sentiment-2018-brum-por": {
"sample_size": 500,
"truncated": 500,
"non_truncated": 0,
"padded": 0,
"non_padded": 500,
"fewshots_truncated": 3503,
"mean_seq_length": 1302.422,
"min_seq_length": 1289,
"max_seq_length": 1335,
"max_ctx_length": 992,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 17.994
}
},
"config": {
"model": "huggingface",
"model_args": "pretrained=nicholasKluge/Aira-2-portuguese-124M,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=4096",
"batch_size": "auto",
"batch_sizes": [],
"device": null,
"use_cache": null,
"limit": [
null,
null,
null,
null,
null,
null,
500.0,
500.0,
500.0,
500.0
],
"bootstrap_iters": 0,
"gen_kwargs": null
},
"git_hash": "15f86b5"
}