Fixing some errors of the leaderboard evaluation results in the ModelCard yaml

#2
Files changed (1) hide show
  1. README.md +27 -1
README.md CHANGED
@@ -80,6 +80,19 @@ model-index:
80
  - type: f1_macro
81
  value: 68.1
82
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  - type: pearson
84
  value: 30.57
85
  name: pearson
@@ -107,7 +120,7 @@ model-index:
107
  name: Text Generation
108
  dataset:
109
  name: HateBR Binary
110
- type: eduagarcia/portuguese_benchmark
111
  split: test
112
  args:
113
  num_few_shot: 25
@@ -115,6 +128,19 @@ model-index:
115
  - type: f1_macro
116
  value: 60.51
117
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  - type: f1_macro
119
  value: 54.6
120
  name: f1-macro
 
80
  - type: f1_macro
81
  value: 68.1
82
  name: f1-macro
83
+ source:
84
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=recogna-nlp/Phi-Bode
85
+ name: Open Portuguese LLM Leaderboard
86
+ - task:
87
+ type: text-generation
88
+ name: Text Generation
89
+ dataset:
90
+ name: Assin2 STS
91
+ type: eduagarcia/portuguese_benchmark
92
+ split: test
93
+ args:
94
+ num_few_shot: 15
95
+ metrics:
96
  - type: pearson
97
  value: 30.57
98
  name: pearson
 
120
  name: Text Generation
121
  dataset:
122
  name: HateBR Binary
123
+ type: ruanchaves/hatebr
124
  split: test
125
  args:
126
  num_few_shot: 25
 
128
  - type: f1_macro
129
  value: 60.51
130
  name: f1-macro
131
+ source:
132
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=recogna-nlp/Phi-Bode
133
+ name: Open Portuguese LLM Leaderboard
134
+ - task:
135
+ type: text-generation
136
+ name: Text Generation
137
+ dataset:
138
+ name: PT Hate Speech Binary
139
+ type: hate_speech_portuguese
140
+ split: test
141
+ args:
142
+ num_few_shot: 25
143
+ metrics:
144
  - type: f1_macro
145
  value: 54.6
146
  name: f1-macro