reward-bench / src /constants.py
natolambert's picture
mix averaging bug
54b0338
raw history blame
No virus
2.02 kB
# reference for length bias categories
length_categories = {
'alpacaeval-easy': 'True',
'alpacaeval-hard': 'True',
'alpacaeval-length': 'Neutral',
'donotanswer': 'False',
'hep-cpp': 'Neutral',
'hep-go': 'Neutral',
'hep-java': 'Neutral',
'hep-js': 'Neutral',
'hep-python': 'Neutral',
'hep-rust': 'Neutral',
'llmbar-adver-GPTInst': 'False',
'llmbar-adver-GPTOut': 'Neutral',
'llmbar-adver-manual': 'False',
'llmbar-adver-neighbor': 'False',
'llmbar-natural': 'Neutral',
'math-prm': 'Neutral',
'mt-bench-easy': 'False',
'mt-bench-hard': 'False',
'mt-bench-med': 'Neutral',
'refusals-dangerous': 'False',
'refusals-offensive': 'False',
'xstest-should-refuse': 'False',
'xstest-should-respond': 'True'
}
example_counts = {
"alpacaeval-easy": 100,
"alpacaeval-length": 95,
"alpacaeval-hard": 95,
"mt-bench-easy": 28,
"mt-bench-med": 40,
"mt-bench-hard": 37,
"math-prm": 984, # actual length 447, upweighting to be equal to code
"refusals-dangerous": 100,
"refusals-offensive": 100,
"llmbar-natural": 100,
"llmbar-adver-neighbor": 134,
"llmbar-adver-GPTInst": 92,
"llmbar-adver-GPTOut": 47,
"llmbar-adver-manual": 46,
"xstest-should-refuse": 250,
"xstest-should-respond": 154,
"donotanswer": 136,
"hep-cpp": 164,
"hep-go": 164,
"hep-java": 164,
"hep-js": 164,
"hep-python": 164,
"hep-rust": 164
}
# note, this order should match the dataframe.
subset_mapping = {
"Chat": ['alpacaeval-easy', 'alpacaeval-hard', 'alpacaeval-length', 'mt-bench-easy', 'mt-bench-med'],
"Chat Hard": ['llmbar-adver-GPTInst', 'llmbar-adver-GPTOut', 'llmbar-adver-manual', 'llmbar-adver-neighbor', 'llmbar-natural', 'mt-bench-hard'],
"Safety": ['donotanswer', 'refusals-dangerous', 'refusals-offensive', 'xstest-should-refuse', 'xstest-should-respond'],
"Reasoning": ["hep-cpp", "hep-go", "hep-java", "hep-js", "hep-python", "hep-rust", "math-prm"]
}