Tucano-160m / results-pt.json
nicholasKluge's picture
Upload results-pt.json with huggingface_hub
4bc3747 verified
raw
history blame
36.3 kB
{
"results": {
"assin2_rte": {
"f1_macro,all": 0.33514630774633175,
"f1_macro_stderr,all": 0.003299397788990543,
"acc,all": 0.5008169934640523,
"acc_stderr,all": 0.007138592229578871,
"alias": "assin2_rte"
},
"assin2_sts": {
"pearson,all": 0.11020343313758801,
"pearson_stderr,all": 0.013365766386907344,
"mse,all": 2.295420751633987,
"mse_stderr,all": "N/A",
"alias": "assin2_sts"
},
"bluex": {
"acc,all": 0.2211404728789986,
"acc_stderr,all": 0.008945969963373294,
"acc,exam_id__UNICAMP_2019": 0.22,
"acc_stderr,exam_id__UNICAMP_2019": 0.033861199085010514,
"acc,exam_id__USP_2022": 0.16326530612244897,
"acc_stderr,exam_id__USP_2022": 0.030400760401929627,
"acc,exam_id__USP_2019": 0.175,
"acc_stderr,exam_id__USP_2019": 0.03458858188173037,
"acc,exam_id__USP_2023": 0.13636363636363635,
"acc_stderr,exam_id__USP_2023": 0.029885038220828584,
"acc,exam_id__USP_2018": 0.16666666666666666,
"acc_stderr,exam_id__USP_2018": 0.02934557492532157,
"acc,exam_id__UNICAMP_2021_2": 0.27450980392156865,
"acc_stderr,exam_id__UNICAMP_2021_2": 0.03613483407763362,
"acc,exam_id__UNICAMP_2024": 0.15555555555555556,
"acc_stderr,exam_id__UNICAMP_2024": 0.031212769243905952,
"acc,exam_id__UNICAMP_2018": 0.2777777777777778,
"acc_stderr,exam_id__UNICAMP_2018": 0.035300438822571445,
"acc,exam_id__UNICAMP_2020": 0.34545454545454546,
"acc_stderr,exam_id__UNICAMP_2020": 0.037131057838730726,
"acc,exam_id__USP_2020": 0.21428571428571427,
"acc_stderr,exam_id__USP_2020": 0.03166215613294707,
"acc,exam_id__UNICAMP_2022": 0.3333333333333333,
"acc_stderr,exam_id__UNICAMP_2022": 0.04362901851162611,
"acc,exam_id__UNICAMP_2023": 0.27906976744186046,
"acc_stderr,exam_id__UNICAMP_2023": 0.039313587729350605,
"acc,exam_id__USP_2024": 0.0975609756097561,
"acc_stderr,exam_id__USP_2024": 0.02675768830365912,
"acc,exam_id__USP_2021": 0.21153846153846154,
"acc_stderr,exam_id__USP_2021": 0.03267850559802995,
"acc,exam_id__UNICAMP_2021_1": 0.2391304347826087,
"acc_stderr,exam_id__UNICAMP_2021_1": 0.03629929450319059,
"alias": "bluex"
},
"enem_challenge": {
"alias": "enem",
"acc,all": 0.19034289713086075,
"acc_stderr,all": 0.006011823590355681,
"acc,exam_id__2017": 0.19827586206896552,
"acc_stderr,exam_id__2017": 0.021279203502942167,
"acc,exam_id__2016": 0.21487603305785125,
"acc_stderr,exam_id__2016": 0.02158314638462637,
"acc,exam_id__2010": 0.23931623931623933,
"acc_stderr,exam_id__2010": 0.0227094402090534,
"acc,exam_id__2022": 0.16541353383458646,
"acc_stderr,exam_id__2022": 0.018562339546846135,
"acc,exam_id__2013": 0.2037037037037037,
"acc_stderr,exam_id__2013": 0.022307626979451566,
"acc,exam_id__2011": 0.17094017094017094,
"acc_stderr,exam_id__2011": 0.0201452444975699,
"acc,exam_id__2023": 0.14814814814814814,
"acc_stderr,exam_id__2023": 0.017671652075145466,
"acc,exam_id__2012": 0.20689655172413793,
"acc_stderr,exam_id__2012": 0.021747470078649574,
"acc,exam_id__2009": 0.20869565217391303,
"acc_stderr,exam_id__2009": 0.021867814507714273,
"acc,exam_id__2014": 0.1651376146788991,
"acc_stderr,exam_id__2014": 0.02058622627036804,
"acc,exam_id__2016_2": 0.18699186991869918,
"acc_stderr,exam_id__2016_2": 0.020307137228639454,
"acc,exam_id__2015": 0.18487394957983194,
"acc_stderr,exam_id__2015": 0.020439501421247595
},
"faquad_nli": {
"f1_macro,all": 0.4396551724137931,
"f1_macro_stderr,all": 0.0035796984729087084,
"acc,all": 0.7846153846153846,
"acc_stderr,all": 0.011396120309131327,
"alias": "faquad_nli"
},
"hatebr_offensive": {
"alias": "hatebr_offensive_binary",
"f1_macro,all": 0.3655600300176512,
"f1_macro_stderr,all": 0.006489736199058569,
"acc,all": 0.51,
"acc_stderr,all": 0.009476909264591516
},
"oab_exams": {
"acc,all": 0.2510250569476082,
"acc_stderr,all": 0.005332808146402586,
"acc,exam_id__2012-06a": 0.3,
"acc_stderr,exam_id__2012-06a": 0.02953634628390834,
"acc,exam_id__2015-18": 0.2625,
"acc_stderr,exam_id__2015-18": 0.02840788991313081,
"acc,exam_id__2011-04": 0.2,
"acc_stderr,exam_id__2011-04": 0.0258243013869545,
"acc,exam_id__2014-13": 0.325,
"acc_stderr,exam_id__2014-13": 0.030219241074677116,
"acc,exam_id__2012-07": 0.175,
"acc_stderr,exam_id__2012-07": 0.024534390351329243,
"acc,exam_id__2016-21": 0.225,
"acc_stderr,exam_id__2016-21": 0.026867276003029624,
"acc,exam_id__2013-12": 0.3125,
"acc_stderr,exam_id__2013-12": 0.02981201784946401,
"acc,exam_id__2014-14": 0.1875,
"acc_stderr,exam_id__2014-14": 0.025145623094832682,
"acc,exam_id__2016-20a": 0.25,
"acc_stderr,exam_id__2016-20a": 0.027915563923627408,
"acc,exam_id__2016-19": 0.28205128205128205,
"acc_stderr,exam_id__2016-19": 0.029391061589889293,
"acc,exam_id__2010-01": 0.21176470588235294,
"acc_stderr,exam_id__2010-01": 0.025669942737076176,
"acc,exam_id__2013-11": 0.2125,
"acc_stderr,exam_id__2013-11": 0.02642535067855396,
"acc,exam_id__2011-05": 0.3,
"acc_stderr,exam_id__2011-05": 0.02960085431070108,
"acc,exam_id__2017-24": 0.2375,
"acc_stderr,exam_id__2017-24": 0.027553516167586348,
"acc,exam_id__2013-10": 0.2875,
"acc_stderr,exam_id__2013-10": 0.02915685783638132,
"acc,exam_id__2015-16": 0.2625,
"acc_stderr,exam_id__2015-16": 0.02843386057218557,
"acc,exam_id__2016-20": 0.3,
"acc_stderr,exam_id__2016-20": 0.029606670976661316,
"acc,exam_id__2015-17": 0.2564102564102564,
"acc_stderr,exam_id__2015-17": 0.028521229460733034,
"acc,exam_id__2012-09": 0.2727272727272727,
"acc_stderr,exam_id__2012-09": 0.029318436233229578,
"acc,exam_id__2011-03": 0.24242424242424243,
"acc_stderr,exam_id__2011-03": 0.024820925193381742,
"acc,exam_id__2017-22": 0.3,
"acc_stderr,exam_id__2017-22": 0.029529133997370303,
"acc,exam_id__2010-02": 0.25,
"acc_stderr,exam_id__2010-02": 0.02500977408052658,
"acc,exam_id__2012-08": 0.2,
"acc_stderr,exam_id__2012-08": 0.02583028493747631,
"acc,exam_id__2014-15": 0.19230769230769232,
"acc_stderr,exam_id__2014-15": 0.02584179623972344,
"acc,exam_id__2012-06": 0.2625,
"acc_stderr,exam_id__2012-06": 0.028334563759606888,
"acc,exam_id__2017-23": 0.275,
"acc_stderr,exam_id__2017-23": 0.028828971018211254,
"acc,exam_id__2018-25": 0.2,
"acc_stderr,exam_id__2018-25": 0.025903737673115802,
"alias": "oab_exams"
},
"portuguese_hate_speech": {
"alias": "portuguese_hate_speech_binary",
"f1_macro,all": 0.22986425339366515,
"f1_macro_stderr,all": 0.0065622482013892635,
"acc,all": 0.2984723854289072,
"acc_stderr,all": 0.011059935171647703
},
"tweetsentbr": {
"f1_macro,all": 0.16863215085413544,
"f1_macro_stderr,all": 0.003902572488556615,
"acc,all": 0.3009950248756219,
"acc_stderr,all": 0.007234369141653642,
"alias": "tweetsentbr"
}
},
"configs": {
"assin2_rte": {
"task": "assin2_rte",
"group": [
"pt_benchmark",
"assin2"
],
"dataset_path": "assin2",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
"doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
"description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
1,
3251,
2,
3252,
3,
4,
5,
6,
3253,
7,
3254,
3255,
3256,
8,
9,
10,
3257,
11,
3258,
12,
13,
14,
15,
3259,
3260,
3261,
3262,
3263,
16,
17,
3264,
18,
3265,
3266,
3267,
19,
20,
3268,
3269,
21,
3270,
3271,
22,
3272,
3273,
23,
3274,
24,
25,
3275
],
"id_column": "sentence_pair_id"
}
},
"num_fewshot": 15,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"Não"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.1
}
},
"assin2_sts": {
"task": "assin2_sts",
"group": [
"pt_benchmark",
"assin2"
],
"dataset_path": "assin2",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
"doc_to_target": "<function assin2_float_to_pt_str at 0x14bd9d57b740>",
"description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
1,
3251,
2,
3252,
3,
4,
5,
6,
3253,
7,
3254,
3255,
3256,
8,
9,
10,
3257,
11,
3258,
12,
13,
14,
15,
3259,
3260,
3261,
3262,
3263,
16,
17,
3264,
18,
3265,
3266,
3267,
19,
20,
3268,
3269,
21,
3270,
3271,
22,
3272,
3273,
23,
3274,
24,
25,
3275
],
"id_column": "sentence_pair_id"
}
},
"num_fewshot": 10,
"metric_list": [
{
"metric": "pearson",
"aggregation": "pearsonr",
"higher_is_better": true
},
{
"metric": "mse",
"aggregation": "mean_squared_error",
"higher_is_better": false
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "number_filter",
"type": "float",
"range_min": 1.0,
"range_max": 5.0,
"on_outside_range": "clip",
"fallback": 5.0
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.1
}
},
"bluex": {
"task": "bluex",
"group": [
"pt_benchmark",
"vestibular"
],
"dataset_path": "eduagarcia-temp/BLUEX_without_images",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function enem_doc_to_text at 0x14bd9d57ad40>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"USP_2018_3",
"UNICAMP_2018_2",
"USP_2018_35",
"UNICAMP_2018_16",
"USP_2018_89"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D",
"E"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
"\\b([ABCDE])\\.",
"\\b([ABCDE]) ?[.):-]",
"\\b([ABCDE])$",
"\\b([ABCDE])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x14bd9d57af20>",
"metadata": {
"version": 1.1
}
},
"enem_challenge": {
"task": "enem_challenge",
"task_alias": "enem",
"group": [
"pt_benchmark",
"vestibular"
],
"dataset_path": "eduagarcia/enem_challenge",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function enem_doc_to_text at 0x14bd9d57b1a0>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"2022_21",
"2022_88",
"2022_143"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D",
"E"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
"\\b([ABCDE])\\.",
"\\b([ABCDE]) ?[.):-]",
"\\b([ABCDE])$",
"\\b([ABCDE])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x14bd9d57b380>",
"metadata": {
"version": 1.1
}
},
"faquad_nli": {
"task": "faquad_nli",
"group": [
"pt_benchmark"
],
"dataset_path": "ruanchaves/faquad-nli",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
"doc_to_target": "{{['Não', 'Sim'][label]}}",
"description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n",
"sampler_config": {
"fewshot_indices": [
1893,
949,
663,
105,
1169,
2910,
2227,
2813,
974,
558,
1503,
1958,
2918,
601,
1560,
984,
2388,
995,
2233,
1982,
165,
2788,
1312,
2285,
522,
1113,
1670,
323,
236,
1263,
1562,
2519,
1049,
432,
1167,
1394,
2022,
2551,
2194,
2187,
2282,
2816,
108,
301,
1185,
1315,
1420,
2436,
2322,
766
]
}
},
"num_fewshot": 15,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"Não"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.1
}
},
"hatebr_offensive": {
"task": "hatebr_offensive",
"task_alias": "hatebr_offensive_binary",
"group": [
"pt_benchmark"
],
"dataset_path": "eduagarcia/portuguese_benchmark",
"dataset_name": "HateBR_offensive_binary",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
"doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
"description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
48,
44,
36,
20,
3511,
88,
3555,
16,
56,
3535,
60,
40,
3527,
4,
76,
3579,
3523,
3551,
68,
3503,
84,
3539,
64,
3599,
80,
3563,
3559,
3543,
3547,
3587,
3595,
3575,
3567,
3591,
24,
96,
92,
3507,
52,
72,
8,
3571,
3515,
3519,
3531,
28,
32,
0,
12,
3583
],
"id_column": "idx"
}
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"Não"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"oab_exams": {
"task": "oab_exams",
"group": [
"legal_benchmark",
"pt_benchmark"
],
"dataset_path": "eduagarcia/oab_exams",
"test_split": "train",
"fewshot_split": "train",
"doc_to_text": "<function doc_to_text at 0x14bd9d57be20>",
"doc_to_target": "{{answerKey}}",
"description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
"2010-01_1",
"2010-01_11",
"2010-01_13",
"2010-01_23",
"2010-01_26",
"2010-01_28",
"2010-01_38",
"2010-01_48",
"2010-01_58",
"2010-01_68",
"2010-01_76",
"2010-01_83",
"2010-01_85",
"2010-01_91",
"2010-01_99"
],
"id_column": "id",
"exclude_from_task": true
}
},
"num_fewshot": 3,
"metric_list": [
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "normalize_spaces"
},
{
"function": "remove_accents"
},
{
"function": "find_choices",
"choices": [
"A",
"B",
"C",
"D"
],
"regex_patterns": [
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
"\\b([ABCD])\\.",
"\\b([ABCD]) ?[.):-]",
"\\b([ABCD])$",
"\\b([ABCD])\\b"
]
},
{
"function": "take_first"
}
],
"group_by": {
"column": "exam_id"
}
}
],
"should_decontaminate": true,
"doc_to_decontamination_query": "<function doc_to_text at 0x14bd9d3f00e0>",
"metadata": {
"version": 1.5
}
},
"portuguese_hate_speech": {
"task": "portuguese_hate_speech",
"task_alias": "portuguese_hate_speech_binary",
"group": [
"pt_benchmark"
],
"dataset_path": "eduagarcia/portuguese_benchmark",
"dataset_name": "Portuguese_Hate_Speech_binary",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
"doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
"description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "id_sampler",
"sampler_config": {
"id_list": [
52,
50,
39,
28,
3,
105,
22,
25,
60,
11,
66,
41,
9,
4,
91,
42,
7,
20,
76,
1,
104,
13,
67,
54,
97,
27,
24,
14,
16,
48,
53,
40,
34,
49,
32,
119,
114,
2,
58,
83,
18,
36,
5,
6,
10,
35,
38,
0,
21,
46
],
"id_column": "idx"
}
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Sim",
"Não"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
},
"tweetsentbr": {
"task": "tweetsentbr",
"group": [
"pt_benchmark"
],
"dataset_path": "eduagarcia/tweetsentbr_fewshot",
"test_split": "test",
"fewshot_split": "train",
"doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
"doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
"description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"num_fewshot": 25,
"metric_list": [
{
"metric": "f1_macro",
"aggregation": "f1_macro",
"higher_is_better": true
},
{
"metric": "acc",
"aggregation": "acc",
"higher_is_better": true
}
],
"output_type": "generate_until",
"generation_kwargs": {
"max_gen_toks": 32,
"do_sample": false,
"temperature": 0.0,
"top_k": null,
"top_p": null,
"until": [
"\n\n"
]
},
"repeats": 1,
"filter_list": [
{
"name": "all",
"filter": [
{
"function": "find_similar_label",
"labels": [
"Positivo",
"Neutro",
"Negativo"
]
},
{
"function": "take_first"
}
]
}
],
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
}
},
"versions": {
"assin2_rte": 1.1,
"assin2_sts": 1.1,
"bluex": 1.1,
"enem_challenge": 1.1,
"faquad_nli": 1.1,
"hatebr_offensive": 1.0,
"oab_exams": 1.5,
"portuguese_hate_speech": 1.0,
"tweetsentbr": 1.0
},
"n-shot": {
"assin2_rte": 15,
"assin2_sts": 10,
"bluex": 3,
"enem_challenge": 3,
"faquad_nli": 15,
"hatebr_offensive": 25,
"oab_exams": 3,
"portuguese_hate_speech": 25,
"tweetsentbr": 25
},
"model_meta": {
"truncated": 2,
"non_truncated": 14148,
"padded": 0,
"non_padded": 14150,
"fewshots_truncated": 4,
"has_chat_template": false,
"chat_type": null,
"n_gpus": 1,
"accelerate_num_process": null,
"model_sha": "None",
"model_dtype": "torch.bfloat16",
"model_memory_footprint": 324836480,
"model_num_parameters": 162417408,
"model_is_loaded_in_4bit": null,
"model_is_loaded_in_8bit": null,
"model_is_quantized": null,
"model_device": "cuda:0",
"batch_size": 32,
"max_length": 2048,
"max_ctx_length": 2016,
"max_gen_toks": 32
},
"task_model_meta": {
"assin2_rte": {
"sample_size": 2448,
"truncated": 0,
"non_truncated": 2448,
"padded": 0,
"non_padded": 2448,
"fewshots_truncated": 0,
"mean_seq_length": 924.4232026143791,
"min_seq_length": 909,
"max_seq_length": 963,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 15.0,
"mean_effective_fewshot_size": 15.0
},
"assin2_sts": {
"sample_size": 2448,
"truncated": 0,
"non_truncated": 2448,
"padded": 0,
"non_padded": 2448,
"fewshots_truncated": 0,
"mean_seq_length": 659.4232026143791,
"min_seq_length": 644,
"max_seq_length": 698,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 10.0,
"mean_effective_fewshot_size": 10.0
},
"bluex": {
"sample_size": 719,
"truncated": 0,
"non_truncated": 719,
"padded": 0,
"non_padded": 719,
"fewshots_truncated": 0,
"mean_seq_length": 1170.817802503477,
"min_seq_length": 904,
"max_seq_length": 1801,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 3.0
},
"enem_challenge": {
"sample_size": 1429,
"truncated": 2,
"non_truncated": 1427,
"padded": 0,
"non_padded": 1429,
"fewshots_truncated": 4,
"mean_seq_length": 1007.4177746675997,
"min_seq_length": 829,
"max_seq_length": 2484,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 2.9972008397480754
},
"faquad_nli": {
"sample_size": 650,
"truncated": 0,
"non_truncated": 650,
"padded": 0,
"non_padded": 650,
"fewshots_truncated": 0,
"mean_seq_length": 968.1338461538462,
"min_seq_length": 936,
"max_seq_length": 1034,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 15.0,
"mean_effective_fewshot_size": 15.0
},
"hatebr_offensive": {
"sample_size": 1400,
"truncated": 0,
"non_truncated": 1400,
"padded": 0,
"non_padded": 1400,
"fewshots_truncated": 0,
"mean_seq_length": 867.4407142857143,
"min_seq_length": 852,
"max_seq_length": 1061,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 25.0
},
"oab_exams": {
"sample_size": 2195,
"truncated": 0,
"non_truncated": 2195,
"padded": 0,
"non_padded": 2195,
"fewshots_truncated": 0,
"mean_seq_length": 832.024145785877,
"min_seq_length": 659,
"max_seq_length": 1108,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 3.0,
"mean_effective_fewshot_size": 3.0
},
"portuguese_hate_speech": {
"sample_size": 851,
"truncated": 0,
"non_truncated": 851,
"padded": 0,
"non_padded": 851,
"fewshots_truncated": 0,
"mean_seq_length": 1219.021151586369,
"min_seq_length": 1192,
"max_seq_length": 1255,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 25.0
},
"tweetsentbr": {
"sample_size": 2010,
"truncated": 0,
"non_truncated": 2010,
"padded": 0,
"non_padded": 2010,
"fewshots_truncated": 0,
"mean_seq_length": 1154.4194029850746,
"min_seq_length": 1137,
"max_seq_length": 1211,
"max_ctx_length": 2016,
"max_gen_toks": 32,
"mean_original_fewshots_size": 25.0,
"mean_effective_fewshot_size": 25.0
}
},
"config": {
"model": "huggingface",
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17029745/step_320000",
"batch_size": "auto",
"batch_sizes": [],
"device": "cuda:0",
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null
},
"git_hash": null
}