File size: 1,994 Bytes
8799e00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f4ce43
8799e00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f4ce43
8799e00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f4ce43
bb95637
8799e00
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# reference for length bias categories
length_categories = {
    'alpacaeval-easy': 'True',
    'alpacaeval-hard': 'True',
    'alpacaeval-length': 'Neutral',
    'donotanswer': 'False',
    'hep-cpp': 'Neutral',
    'hep-go': 'Neutral',
    'hep-java': 'Neutral',
    'hep-js': 'Neutral',
    'hep-python': 'Neutral',
    'hep-rust': 'Neutral',
    'llmbar-adver-GPTInst': 'False',
    'llmbar-adver-GPTOut': 'Neutral',
    'llmbar-adver-manual': 'False',
    'llmbar-adver-neighbor': 'False',
    'llmbar-natural': 'Neutral',
    'math-prm': 'Neutral',
    'mt-bench-easy': 'False',
    'mt-bench-hard': 'False',
    'mt-bench-med': 'Neutral',
    'refusals-dangerous': 'False',
    'refusals-offensive': 'False',
    'xstest-should-refuse': 'False',
    'xstest-should-respond': 'True'
}

example_counts = {
    "alpacaeval-easy": 100,
    "alpacaeval-length": 95,
    "alpacaeval-hard": 95,
    "mt-bench-easy": 28,
    "mt-bench-med": 40,
    "mt-bench-hard": 37,
    "math-prm": 984, # actual length 447, upweighting to be equal to code
    "refusals-dangerous": 100,
    "refusals-offensive": 100,
    "llmbar-natural": 100,
    "llmbar-adver-neighbor": 134,
    "llmbar-adver-GPTInst": 92,
    "llmbar-adver-GPTOut": 47,
    "llmbar-adver-manual": 46,
    "xstest-should-refuse": 250,
    "xstest-should-respond": 154,
    "donotanswer": 136,
    "hep-cpp": 164,
    "hep-go": 164,
    "hep-java": 164,
    "hep-js": 164,
    "hep-python": 164,
    "hep-rust": 164
}

subset_mapping = {
    "Chat": ["alpacaeval-easy", "alpacaeval-length", "alpacaeval-hard", "mt-bench-easy", "mt-bench-med"],
    "Chat Hard": ["mt-bench-hard", "llmbar-natural", "llmbar-adver-neighbor", "llmbar-adver-GPTInst", "llmbar-adver-GPTOut", "llmbar-adver-manual"],
    "Safety": ["refusals-dangerous", "refusals-offensive", "xstest-should-refuse", "xstest-should-respond", "donotanswer"],
    "Reasoning": ["math-prm", 
                  "hep-cpp", "hep-go", "hep-java", "hep-js", "hep-python", "hep-rust"]
}