Spaces:
Running
Running
feat: update models and subclass
#1
by
Jerry0723
- opened
- app.py +2 -2
- changelog.md +7 -0
- data/chinese_benchmark_gen.csv +3 -0
- data/chinese_benchmark_per.csv +3 -0
- data/subclass_gen.csv +3 -0
- data/subclass_per.csv +3 -0
app.py
CHANGED
@@ -18,7 +18,7 @@ METRICS = ["Accuracy", "Precision_Unsafe", "Recall_Unsafe", "Precision_Safe", "R
|
|
18 |
SUBCLASS = ["Discrimination", "Variant", "Psychology", "Politics", "Eroticism", "Vulgarity", "Property", "Injury", "Criminality", "Ethics"]
|
19 |
|
20 |
#SPLITS = ["Overall", "Subclass"]
|
21 |
-
SPLITS = ["Overall", "Variant", "Psychology", "Politics", "Eroticism", "Vulgarity", "Property", "Injury", "Criminality", "Ethics"]
|
22 |
|
23 |
CLASSIFICATION = {
|
24 |
"model_size": [
|
@@ -46,7 +46,7 @@ _BIBTEX = """
|
|
46 |
}
|
47 |
"""
|
48 |
|
49 |
-
_LAST_UPDATED = "
|
50 |
|
51 |
banner_url = "./assets/logo.png"
|
52 |
_BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>' # noqa
|
|
|
18 |
SUBCLASS = ["Discrimination", "Variant", "Psychology", "Politics", "Eroticism", "Vulgarity", "Property", "Injury", "Criminality", "Ethics"]
|
19 |
|
20 |
#SPLITS = ["Overall", "Subclass"]
|
21 |
+
SPLITS = ["Overall", "Discrimination", "Variant", "Psychology", "Politics", "Eroticism", "Vulgarity", "Property", "Injury", "Criminality", "Ethics"]
|
22 |
|
23 |
CLASSIFICATION = {
|
24 |
"model_size": [
|
|
|
46 |
}
|
47 |
"""
|
48 |
|
49 |
+
_LAST_UPDATED = "November 18, 2024"
|
50 |
|
51 |
banner_url = "./assets/logo.png"
|
52 |
_BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>' # noqa
|
changelog.md
CHANGED
@@ -12,3 +12,10 @@ version: v1.0.1
|
|
12 |
|
13 |
changed:
|
14 |
- [1]feat: add citation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
changed:
|
14 |
- [1]feat: add citation
|
15 |
+
|
16 |
+
### 2024-11-18
|
17 |
+
version: v1.0.2
|
18 |
+
|
19 |
+
changed:
|
20 |
+
- [1]feat: add three models: Qwen2.5-72B, Qwen2.5-32B, Qwen2-72B
|
21 |
+
- [2]feat: add subclass: Discrimination
|
data/chinese_benchmark_gen.csv
CHANGED
@@ -2,7 +2,10 @@ Model Size Accuracy/std Precision_Unsafe/std Recall_Unsafe/std Precision_Safe/st
|
|
2 |
DeepSeek-LLM-67B-Chat >65B 76.76/0.35 73.40/0.37 84.26/0.40 81.34/0.35 69.19/0.64
|
3 |
Llama3-ChatQA-1.5-70B >65B 65.29/0.29 66.24/0.50 62.92/0.12 64.43/0.19 67.69/0.63
|
4 |
Qwen1.5-72B-Chat >65B 62.91/0.50 73.86/0.84 40.46/0.97 58.75/0.35 85.55/0.62
|
|
|
5 |
Opt-66B >65B 54.46/0.17 53.22/0.06 76.94/0.24 57.73/0.49 31.77/0.28
|
|
|
|
|
6 |
Yi-1.5-34B-Chat ~30B 60.06/0.43 58.14/0.40 72.51/0.55 63.27/0.56 47.56/0.42
|
7 |
Opt-30B ~30B 50.88/0.11 50.76/0.12 72.95/0.16 51.18/0.26 28.62/0.28
|
8 |
InternLM2-Chat-20B 10B~20B 70.21/0.55 73.30/0.70 63.79/0.43 67.82/0.45 76.65/0.67
|
|
|
2 |
DeepSeek-LLM-67B-Chat >65B 76.76/0.35 73.40/0.37 84.26/0.40 81.34/0.35 69.19/0.64
|
3 |
Llama3-ChatQA-1.5-70B >65B 65.29/0.29 66.24/0.50 62.92/0.12 64.43/0.19 67.69/0.63
|
4 |
Qwen1.5-72B-Chat >65B 62.91/0.50 73.86/0.84 40.46/0.97 58.75/0.35 85.55/0.62
|
5 |
+
Qwen2.5-72B >65B 58.00/0.12 65.34/0.26 34.86/0.48 55.31/0.06 81.35/0.14
|
6 |
Opt-66B >65B 54.46/0.17 53.22/0.06 76.94/0.24 57.73/0.49 31.77/0.28
|
7 |
+
Qwen2-72B >65B 52.21/0.52 54.27/1.09 30.79/0.50 51.39/0.31 73.82/0.63
|
8 |
+
Qwen2.5-32B ~30B 63.01/0.16 77.14/0.40 37.45/0.40 58.46/0.05 88.80/0.11
|
9 |
Yi-1.5-34B-Chat ~30B 60.06/0.43 58.14/0.40 72.51/0.55 63.27/0.56 47.56/0.42
|
10 |
Opt-30B ~30B 50.88/0.11 50.76/0.12 72.95/0.16 51.18/0.26 28.62/0.28
|
11 |
InternLM2-Chat-20B 10B~20B 70.21/0.55 73.30/0.70 63.79/0.43 67.82/0.45 76.65/0.67
|
data/chinese_benchmark_per.csv
CHANGED
@@ -2,9 +2,12 @@ Model Size Accuracy/std Precision_Unsafe/std Recall_Unsafe/std Precision_Safe/st
|
|
2 |
DeepSeek-LLM-67B-Chat >65B 68.08/0.35 94.80/0.83 38.40/0.43 61.27/0.26 97.88/0.36
|
3 |
Qwen1.5-72B-Chat >65B 63.67/0.46 58.27/0.32 96.84/0.13 90.51/0.57 30.34/0.80
|
4 |
Opt-66B >65B 59.93/0.41 56.52/0.37 86.87/0.59 71.36/0.78 32.86/0.74
|
|
|
|
|
5 |
Llama3-ChatQA-1.5-70B >65B 40.41/0.29 33.86/0.75 19.84/0.75 43.13/0.25 61.08/0.37
|
6 |
Yi-1.5-34B-Chat ~30B 66.02/0.22 80.13/0.55 42.82/0.25 60.86/0.16 89.33/0.41
|
7 |
Opt-30B ~30B 53.82/0.03 54.42/0.21 48.32/0.20 53.34/0.11 59.34/0.27
|
|
|
8 |
Baichuan2-13B-Chat 10B~20B 70.43/0.39 65.81/0.38 85.34/0.63 79.02/0.63 55.46/0.47
|
9 |
Qwen1.5-14B 10B~20B 61.29/0.40 57.02/0.32 92.43/0.55 79.80/1.05 30.02/0.47
|
10 |
Ziya2-13B-Chat 10B~20B 55.25/0.26 59.24/0.37 34.30/0.11 53.61/0.26 76.29/0.39
|
|
|
2 |
DeepSeek-LLM-67B-Chat >65B 68.08/0.35 94.80/0.83 38.40/0.43 61.27/0.26 97.88/0.36
|
3 |
Qwen1.5-72B-Chat >65B 63.67/0.46 58.27/0.32 96.84/0.13 90.51/0.57 30.34/0.80
|
4 |
Opt-66B >65B 59.93/0.41 56.52/0.37 86.87/0.59 71.36/0.78 32.86/0.74
|
5 |
+
Qwen2-72B >65B 51.09/0.17 50.61/0.15 99.91/0.06 95.88/2.24 2.04/0.22
|
6 |
+
Qwen2.5-72B >65B 49.68/0.15 49.90/0.14 99.00/0.17 13.24/3.84 0.15/0.02
|
7 |
Llama3-ChatQA-1.5-70B >65B 40.41/0.29 33.86/0.75 19.84/0.75 43.13/0.25 61.08/0.37
|
8 |
Yi-1.5-34B-Chat ~30B 66.02/0.22 80.13/0.55 42.82/0.25 60.86/0.16 89.33/0.41
|
9 |
Opt-30B ~30B 53.82/0.03 54.42/0.21 48.32/0.20 53.34/0.11 59.34/0.27
|
10 |
+
Qwen2.5-32B ~30B 51.94/0.24 51.05/0.19 99.42/0.11 87.97/2.00 4.23/0.22
|
11 |
Baichuan2-13B-Chat 10B~20B 70.43/0.39 65.81/0.38 85.34/0.63 79.02/0.63 55.46/0.47
|
12 |
Qwen1.5-14B 10B~20B 61.29/0.40 57.02/0.32 92.43/0.55 79.80/1.05 30.02/0.47
|
13 |
Ziya2-13B-Chat 10B~20B 55.25/0.26 59.24/0.37 34.30/0.11 53.61/0.26 76.29/0.39
|
data/subclass_gen.csv
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
Model,Size,Discrimination_Accuracy,Discrimination_Precision,Discrimination_Recall,Variant_Accuracy,Variant_Precision,Variant_Recall,Psychology_Accuracy,Psychology_Precision,Psychology_Recall,Politics_Accuracy,Politics_Precision,Politics_Recall,Eroticism_Accuracy,Eroticism_Precision,Eroticism_Recall,Vulgarity_Accuracy,Vulgarity_Precision,Vulgarity_Recall,Property_Accuracy,Property_Precision,Property_Recall,Injury_Accuracy,Injury_Precision,Injury_Recall,Criminality_Accuracy,Criminality_Precision,Criminality_Recall,Ethics_Accuracy,Ethics_Precision,Ethics_Recall
|
2 |
DeepSeek-LLM-67B-Chat,>65B,0.7897,0.7454,0.8652,0.8482,0.7832,0.9726,0.6603,0.6751,0.6011,0.8344,0.7978,0.932,0.8367,0.78,0.9497,0.8449,0.769,0.9767,0.7985,0.7493,0.8825,0.6171,0.6366,0.5125,0.8258,0.7583,0.9401,0.7387,0.7276,0.7596
|
3 |
Qwen1.5-72B-Chat,>65B,0.5998,0.693,0.3298,0.8005,0.8477,0.7444,0.4697,0.3314,0.0703,0.6671,0.812,0.506,0.7676,0.8369,0.6803,0.7069,0.7895,0.5476,0.5825,0.6666,0.2918,0.4697,0.3186,0.0668,0.7076,0.7867,0.546,0.5283,0.5803,0.1942
|
|
|
|
|
4 |
Opt-66B,>65B,0.4866,0.482,0.682,0.5174,0.5203,0.7258,0.5579,0.5338,0.8237,0.5646,0.5728,0.7868,0.5385,0.535,0.7659,0.5571,0.5309,0.8257,0.5414,0.5199,0.7954,0.5354,0.5181,0.7801,0.5376,0.515,0.7909,0.5079,0.5041,0.7185
|
5 |
Llama3-ChatQA-1.5-70B,>65B,0.6682,0.6617,0.6566,0.6859,0.6932,0.6922,0.6079,0.6187,0.5348,0.6548,0.7024,0.6342,0.6861,0.6945,0.6928,0.7029,0.6853,0.7281,0.6211,0.6242,0.5599,0.6105,0.6189,0.5397,0.7134,0.6873,0.7493,0.59,0.6072,0.4996
|
6 |
Yi-1.5-34B-Chat,~30B,0.66,0.6114,0.8339,0.7311,0.6644,0.9577,0.3309,0.2379,0.1626,0.6958,0.6708,0.8646,0.7046,0.6528,0.9053,0.7084,0.6383,0.9309,0.5928,0.5672,0.6961,0.4467,0.4308,0.3972,0.6956,0.6281,0.9097,0.5182,0.515,0.5425
|
|
|
7 |
Opt-30B,~30B,0.4672,0.4683,0.6648,0.5002,0.5082,0.7109,0.5044,0.4987,0.7354,0.5314,0.5517,0.7422,0.5108,0.5163,0.7304,0.5161,0.5039,0.7618,0.513,0.5009,0.7578,0.4956,0.4908,0.719,0.5119,0.4977,0.7583,0.4958,0.4955,0.7134
|
8 |
Baichuan2-13B-Chat,10B~20B,0.6337,0.6402,0.5755,0.7188,0.7164,0.7457,0.5185,0.5189,0.3417,0.7341,0.7487,0.7703,0.7033,0.7091,0.7143,0.6742,0.6712,0.6575,0.5657,0.5728,0.434,0.6151,0.6264,0.5371,0.6515,0.65,0.6089,0.5532,0.5707,0.414
|
9 |
Qwen1.5-14B,10B~20B,0.7099,0.6657,0.8141,0.7897,0.7205,0.9615,0.5669,0.5657,0.5226,0.7776,0.7373,0.9181,0.7571,0.7073,0.897,0.7862,0.7044,0.97,0.6421,0.6225,0.6757,0.5014,0.4893,0.3888,0.7563,0.6869,0.9116,0.5499,0.5538,0.4889
|
|
|
1 |
Model,Size,Discrimination_Accuracy,Discrimination_Precision,Discrimination_Recall,Variant_Accuracy,Variant_Precision,Variant_Recall,Psychology_Accuracy,Psychology_Precision,Psychology_Recall,Politics_Accuracy,Politics_Precision,Politics_Recall,Eroticism_Accuracy,Eroticism_Precision,Eroticism_Recall,Vulgarity_Accuracy,Vulgarity_Precision,Vulgarity_Recall,Property_Accuracy,Property_Precision,Property_Recall,Injury_Accuracy,Injury_Precision,Injury_Recall,Criminality_Accuracy,Criminality_Precision,Criminality_Recall,Ethics_Accuracy,Ethics_Precision,Ethics_Recall
|
2 |
DeepSeek-LLM-67B-Chat,>65B,0.7897,0.7454,0.8652,0.8482,0.7832,0.9726,0.6603,0.6751,0.6011,0.8344,0.7978,0.932,0.8367,0.78,0.9497,0.8449,0.769,0.9767,0.7985,0.7493,0.8825,0.6171,0.6366,0.5125,0.8258,0.7583,0.9401,0.7387,0.7276,0.7596
|
3 |
Qwen1.5-72B-Chat,>65B,0.5998,0.693,0.3298,0.8005,0.8477,0.7444,0.4697,0.3314,0.0703,0.6671,0.812,0.506,0.7676,0.8369,0.6803,0.7069,0.7895,0.5476,0.5825,0.6666,0.2918,0.4697,0.3186,0.0668,0.7076,0.7867,0.546,0.5283,0.5803,0.1942
|
4 |
+
Qwen2.5-72B,>65B,0.5622,0.6088,0.2926,0.7066,0.7771,0.5982,0.4387,0.2045,0.0474,0.5748,0.7101,0.3694,0.6984,0.773,0.5824,0.713,0.7627,0.6019,0.5482,0.5828,0.2633,0.4766,0.3937,0.1209,0.6552,0.7153,0.4801,0.4594,0.346,0.096
|
5 |
+
Qwen2-72B,>65B,0.5206,0.5178,0.2928,0.5984,0.6507,0.465,0.4233,0.2721,0.0999,0.5126,0.5972,0.3239,0.609,0.6613,0.4863,0.6546,0.6757,0.567,0.4871,0.4511,0.2244,0.4341,0.3064,0.1193,0.5419,0.5457,0.3335,0.4394,0.346,0.1385
|
6 |
Opt-66B,>65B,0.4866,0.482,0.682,0.5174,0.5203,0.7258,0.5579,0.5338,0.8237,0.5646,0.5728,0.7868,0.5385,0.535,0.7659,0.5571,0.5309,0.8257,0.5414,0.5199,0.7954,0.5354,0.5181,0.7801,0.5376,0.515,0.7909,0.5079,0.5041,0.7185
|
7 |
Llama3-ChatQA-1.5-70B,>65B,0.6682,0.6617,0.6566,0.6859,0.6932,0.6922,0.6079,0.6187,0.5348,0.6548,0.7024,0.6342,0.6861,0.6945,0.6928,0.7029,0.6853,0.7281,0.6211,0.6242,0.5599,0.6105,0.6189,0.5397,0.7134,0.6873,0.7493,0.59,0.6072,0.4996
|
8 |
Yi-1.5-34B-Chat,~30B,0.66,0.6114,0.8339,0.7311,0.6644,0.9577,0.3309,0.2379,0.1626,0.6958,0.6708,0.8646,0.7046,0.6528,0.9053,0.7084,0.6383,0.9309,0.5928,0.5672,0.6961,0.4467,0.4308,0.3972,0.6956,0.6281,0.9097,0.5182,0.515,0.5425
|
9 |
+
Qwen2.5-32B,~30B,0.6452,0.7778,0.3845,0.7079,0.841,0.5291,0.5733,0.6924,0.2427,0.5968,0.7977,0.3472,0.6925,0.833,0.5,0.7265,0.8351,0.5509,0.608,0.7347,0.3069,0.6013,0.7316,0.2971,0.6815,0.8033,0.4554,0.5108,0.5397,0.1237
|
10 |
Opt-30B,~30B,0.4672,0.4683,0.6648,0.5002,0.5082,0.7109,0.5044,0.4987,0.7354,0.5314,0.5517,0.7422,0.5108,0.5163,0.7304,0.5161,0.5039,0.7618,0.513,0.5009,0.7578,0.4956,0.4908,0.719,0.5119,0.4977,0.7583,0.4958,0.4955,0.7134
|
11 |
Baichuan2-13B-Chat,10B~20B,0.6337,0.6402,0.5755,0.7188,0.7164,0.7457,0.5185,0.5189,0.3417,0.7341,0.7487,0.7703,0.7033,0.7091,0.7143,0.6742,0.6712,0.6575,0.5657,0.5728,0.434,0.6151,0.6264,0.5371,0.6515,0.65,0.6089,0.5532,0.5707,0.414
|
12 |
Qwen1.5-14B,10B~20B,0.7099,0.6657,0.8141,0.7897,0.7205,0.9615,0.5669,0.5657,0.5226,0.7776,0.7373,0.9181,0.7571,0.7073,0.897,0.7862,0.7044,0.97,0.6421,0.6225,0.6757,0.5014,0.4893,0.3888,0.7563,0.6869,0.9116,0.5499,0.5538,0.4889
|
data/subclass_per.csv
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
Model,Size,Discrimination_Accuracy,Discrimination_Precision,Discrimination_Recall,Variant_Accuracy,Variant_Precision,Variant_Recall,Psychology_Accuracy,Psychology_Precision,Psychology_Recall,Politics_Accuracy,Politics_Precision,Politics_Recall,Eroticism_Accuracy,Eroticism_Precision,Eroticism_Recall,Vulgarity_Accuracy,Vulgarity_Precision,Vulgarity_Recall,Property_Accuracy,Property_Precision,Property_Recall,Injury_Accuracy,Injury_Precision,Injury_Recall,Criminality_Accuracy,Criminality_Precision,Criminality_Recall,Ethics_Accuracy,Ethics_Precision,Ethics_Recall
|
2 |
DeepSeek-LLM-67B-Chat,>65B,0.6948,0.9451,0.3989,0.6447,0.9375,0.3259,0.5122,0.5824,0.033,0.7673,0.9695,0.5903,0.6865,0.9516,0.4092,0.899,0.9725,0.8159,0.66,0.9341,0.326,0.5479,0.8184,0.1017,0.8777,0.9706,0.7709,0.5142,0.6736,0.0456
|
3 |
Qwen1.5-72B-Chat,>65B,0.6479,0.581,0.9985,0.6609,0.6019,0.9938,0.6472,0.5837,0.9906,0.5928,0.5895,0.8276,0.6544,0.5996,0.9796,0.6488,0.5823,0.9987,0.6448,0.5792,0.9932,0.6255,0.5712,0.9493,0.6433,0.5763,0.9951,0.6485,0.5872,0.9874
|
|
|
|
|
4 |
Opt-66B,>65B,0.645,0.5831,0.9572,0.3981,0.417,0.4471,0.6667,0.5971,0.9953,0.6232,0.6095,0.8551,0.4854,0.4984,0.6176,0.652,0.5874,0.9698,0.6511,0.5859,0.9706,0.6604,0.5926,0.9853,0.6556,0.586,0.9846,0.655,0.5943,0.9665
|
5 |
Llama3-ChatQA-1.5-70B,>65B,0.3666,0.2082,0.1069,0.339,0.169,0.0752,0.3147,0.0148,0.0059,0.2947,0.075,0.0261,0.7758,0.7167,0.9293,0.5528,0.5482,0.4877,0.3396,0.111,0.0507,0.3207,0.0374,0.0156,0.4392,0.3806,0.2524,0.3214,0.0614,0.0253
|
6 |
Yi-1.5-34B-Chat,~30B,0.7139,0.8341,0.5176,0.7722,0.8735,0.6482,0.475,0.2581,0.0357,0.7162,0.8717,0.5603,0.6206,0.7912,0.353,0.8816,0.8938,0.8601,0.6412,0.7813,0.3672,0.497,0.4306,0.0769,0.8472,0.8832,0.7889,0.4818,0.3646,0.0576
|
|
|
7 |
Opt-30B,~30B,0.5831,0.5754,0.5565,0.3952,0.338,0.1915,0.6784,0.6507,0.7506,0.5798,0.6281,0.5559,0.357,0.2405,0.1185,0.406,0.3224,0.1945,0.6203,0.6061,0.633,0.6188,0.6076,0.6293,0.6031,0.5886,0.5976,0.6244,0.6184,0.6415
|
8 |
Baichuan2-13B-Chat,10B~20B,0.7346,0.6715,0.8932,0.7703,0.7043,0.9491,0.6303,0.6129,0.6785,0.7435,0.7152,0.8777,0.779,0.7088,0.9649,0.7677,0.6883,0.9601,0.6763,0.6388,0.7738,0.6359,0.6149,0.6904,0.7096,0.6554,0.8436,0.7306,0.6762,0.8788
|
9 |
Qwen1.5-14B,10B~20B,0.625,0.5683,0.964,0.6549,0.5977,0.9932,0.5983,0.5571,0.9038,0.6561,0.6193,0.9535,0.6592,0.6005,0.9994,0.6382,0.5759,0.9897,0.5579,0.53,0.8275,0.5009,0.4938,0.7077,0.6256,0.566,0.9705,0.6063,0.5643,0.914
|
|
|
1 |
Model,Size,Discrimination_Accuracy,Discrimination_Precision,Discrimination_Recall,Variant_Accuracy,Variant_Precision,Variant_Recall,Psychology_Accuracy,Psychology_Precision,Psychology_Recall,Politics_Accuracy,Politics_Precision,Politics_Recall,Eroticism_Accuracy,Eroticism_Precision,Eroticism_Recall,Vulgarity_Accuracy,Vulgarity_Precision,Vulgarity_Recall,Property_Accuracy,Property_Precision,Property_Recall,Injury_Accuracy,Injury_Precision,Injury_Recall,Criminality_Accuracy,Criminality_Precision,Criminality_Recall,Ethics_Accuracy,Ethics_Precision,Ethics_Recall
|
2 |
DeepSeek-LLM-67B-Chat,>65B,0.6948,0.9451,0.3989,0.6447,0.9375,0.3259,0.5122,0.5824,0.033,0.7673,0.9695,0.5903,0.6865,0.9516,0.4092,0.899,0.9725,0.8159,0.66,0.9341,0.326,0.5479,0.8184,0.1017,0.8777,0.9706,0.7709,0.5142,0.6736,0.0456
|
3 |
Qwen1.5-72B-Chat,>65B,0.6479,0.581,0.9985,0.6609,0.6019,0.9938,0.6472,0.5837,0.9906,0.5928,0.5895,0.8276,0.6544,0.5996,0.9796,0.6488,0.5823,0.9987,0.6448,0.5792,0.9932,0.6255,0.5712,0.9493,0.6433,0.5763,0.9951,0.6485,0.5872,0.9874
|
4 |
+
Qwen2.5-72B,>65B,0.4889,0.4886,0.9993,0.5119,0.5114,1.0,0.465,0.4783,0.9413,0.5446,0.5442,1.0,0.5132,0.5128,1.0,0.4904,0.4899,1.0,0.4834,0.4855,0.9892,0.4779,0.4841,0.9716,0.4852,0.4847,1.0,0.4958,0.4966,0.9947
|
5 |
+
Qwen2-72B,>65B,0.5005,0.4943,1.0,0.5227,0.517,1.0,0.5051,0.499,1.0,0.5511,0.5481,0.9935,0.5238,0.5182,0.9994,0.5017,0.4955,1.0,0.5,0.4938,1.0,0.5027,0.4967,0.9993,0.4967,0.4903,1.0,0.5096,0.5035,1.0
|
6 |
Opt-66B,>65B,0.645,0.5831,0.9572,0.3981,0.417,0.4471,0.6667,0.5971,0.9953,0.6232,0.6095,0.8551,0.4854,0.4984,0.6176,0.652,0.5874,0.9698,0.6511,0.5859,0.9706,0.6604,0.5926,0.9853,0.6556,0.586,0.9846,0.655,0.5943,0.9665
|
7 |
Llama3-ChatQA-1.5-70B,>65B,0.3666,0.2082,0.1069,0.339,0.169,0.0752,0.3147,0.0148,0.0059,0.2947,0.075,0.0261,0.7758,0.7167,0.9293,0.5528,0.5482,0.4877,0.3396,0.111,0.0507,0.3207,0.0374,0.0156,0.4392,0.3806,0.2524,0.3214,0.0614,0.0253
|
8 |
Yi-1.5-34B-Chat,~30B,0.7139,0.8341,0.5176,0.7722,0.8735,0.6482,0.475,0.2581,0.0357,0.7162,0.8717,0.5603,0.6206,0.7912,0.353,0.8816,0.8938,0.8601,0.6412,0.7813,0.3672,0.497,0.4306,0.0769,0.8472,0.8832,0.7889,0.4818,0.3646,0.0576
|
9 |
+
Qwen2.5-32B,~30B,0.5101,0.4992,0.998,0.5328,0.5224,1.0,0.5114,0.5022,0.9914,0.5642,0.5551,1.0,0.5341,0.5237,1.0,0.5124,0.5009,1.0,0.5047,0.496,0.9876,0.5024,0.4964,0.9768,0.5073,0.4957,1.0,0.513,0.5054,0.986
|
10 |
Opt-30B,~30B,0.5831,0.5754,0.5565,0.3952,0.338,0.1915,0.6784,0.6507,0.7506,0.5798,0.6281,0.5559,0.357,0.2405,0.1185,0.406,0.3224,0.1945,0.6203,0.6061,0.633,0.6188,0.6076,0.6293,0.6031,0.5886,0.5976,0.6244,0.6184,0.6415
|
11 |
Baichuan2-13B-Chat,10B~20B,0.7346,0.6715,0.8932,0.7703,0.7043,0.9491,0.6303,0.6129,0.6785,0.7435,0.7152,0.8777,0.779,0.7088,0.9649,0.7677,0.6883,0.9601,0.6763,0.6388,0.7738,0.6359,0.6149,0.6904,0.7096,0.6554,0.8436,0.7306,0.6762,0.8788
|
12 |
Qwen1.5-14B,10B~20B,0.625,0.5683,0.964,0.6549,0.5977,0.9932,0.5983,0.5571,0.9038,0.6561,0.6193,0.9535,0.6592,0.6005,0.9994,0.6382,0.5759,0.9897,0.5579,0.53,0.8275,0.5009,0.4938,0.7077,0.6256,0.566,0.9705,0.6063,0.5643,0.914
|