Spaces:
Running
Running
# reference for length bias categories | |
length_categories = { | |
'alpacaeval-easy': 'True', | |
'alpacaeval-hard': 'True', | |
'alpacaeval-length': 'Neutral', | |
'donotanswer': 'False', | |
'hep-cpp': 'Neutral', | |
'hep-go': 'Neutral', | |
'hep-java': 'Neutral', | |
'hep-js': 'Neutral', | |
'hep-python': 'Neutral', | |
'hep-rust': 'Neutral', | |
'llmbar-adver-GPTInst': 'False', | |
'llmbar-adver-GPTOut': 'Neutral', | |
'llmbar-adver-manual': 'False', | |
'llmbar-adver-neighbor': 'False', | |
'llmbar-natural': 'Neutral', | |
'math-prm': 'Neutral', | |
'mt-bench-easy': 'False', | |
'mt-bench-hard': 'False', | |
'mt-bench-med': 'Neutral', | |
'refusals-dangerous': 'False', | |
'refusals-offensive': 'False', | |
'xstest-should-refuse': 'False', | |
'xstest-should-respond': 'True' | |
} | |
example_counts = { | |
"alpacaeval-easy": 100, | |
"alpacaeval-length": 95, | |
"alpacaeval-hard": 95, | |
"mt-bench-easy": 28, | |
"mt-bench-med": 40, | |
"mt-bench-hard": 37, | |
"math-prm": 984, # actual length 447, upweighting to be equal to code | |
"refusals-dangerous": 100, | |
"refusals-offensive": 100, | |
"llmbar-natural": 100, | |
"llmbar-adver-neighbor": 134, | |
"llmbar-adver-GPTInst": 92, | |
"llmbar-adver-GPTOut": 47, | |
"llmbar-adver-manual": 46, | |
"xstest-should-refuse": 250, | |
"xstest-should-respond": 154, | |
"donotanswer": 136, | |
"hep-cpp": 164, | |
"hep-go": 164, | |
"hep-java": 164, | |
"hep-js": 164, | |
"hep-python": 164, | |
"hep-rust": 164 | |
} | |
subset_mapping = { | |
"Chat": ["alpacaeval-easy", "alpacaeval-length", "alpacaeval-hard", "mt-bench-easy", "mt-bench-med"], | |
"Chat Hard": ["mt-bench-hard", "llmbar-natural", "llmbar-adver-neighbor", "llmbar-adver-GPTInst", "llmbar-adver-GPTOut", "llmbar-adver-manual"], | |
"Safety": ["refusals-dangerous", "refusals-offensive", "xstest-should-refuse", "xstest-should-respond", "donotanswer"], | |
"Reasoning": ["math-prm", | |
"hep-cpp", "hep-go", "hep-java", "hep-js", "hep-python", "hep-rust"] | |
} | |