text
stringlengths 0
15.3k
|
---|
def higher_is_better(self): |
return {metric.replace('metrics.', ''): True for metric in self.metrics} |
# File: lm-evaluation-harness-main/lm_eval/tasks/webqs/utils.py |
from typing import Dict, List |
def doc_to_choice(doc: Dict) -> List[str]: |
return _remove_prefixes(doc['answers']) |
def doc_to_target(doc: Dict) -> List[int]: |
remaining = _remove_prefixes(doc['answers']) |
return list(range(len(remaining))) |
def _remove_prefixes(aliases): |
aliases.sort() |
ret = [aliases[0]] |
for alias in aliases[1:]: |
if not alias.startswith(ret[-1]): |
ret.append(alias) |
return ret |
# File: lm-evaluation-harness-main/lm_eval/tasks/wikitext/preprocess_wikitext.py |
import re |
def wikitext_detokenizer(doc): |
string = doc['page'] |
string = string.replace("s '", "s'") |
string = re.sub("/' [0-9]/", "/'[0-9]/", string) |
string = string.replace(' @-@ ', '-') |
string = string.replace(' @,@ ', ',') |
string = string.replace(' @.@ ', '.') |
string = string.replace(' : ', ': ') |
string = string.replace(' ; ', '; ') |
string = string.replace(' . ', '. ') |
string = string.replace(' ! ', '! ') |
string = string.replace(' ? ', '? ') |
string = string.replace(' , ', ', ') |
string = re.sub('\\(\\s*([^\\)]*?)\\s*\\)', '(\\1)', string) |
string = re.sub('\\[\\s*([^\\]]*?)\\s*\\]', '[\\1]', string) |
string = re.sub('{\\s*([^}]*?)\\s*}', '{\\1}', string) |
string = re.sub('\\"\\s*([^\\"]*?)\\s*\\"', '"\\1"', string) |
string = re.sub("'\\s*([^']*?)\\s*'", "'\\1'", string) |
string = string.replace('= = = =', '====') |
string = string.replace('= = =', '===') |
string = string.replace('= =', '==') |
string = string.replace(' ' + chr(176) + ' ', chr(176)) |
string = string.replace(' \n', '\n') |
string = string.replace('\n ', '\n') |
string = string.replace(' N ', ' 1 ') |
string = string.replace(" 's", "'s") |
return string |
def process_results(doc, results): |
(loglikelihood,) = results |
_words = len(re.split('\\s+', doc['page'])) |
_bytes = len(doc['page'].encode('utf-8')) |
return {'word_perplexity': (loglikelihood, _words), 'byte_perplexity': (loglikelihood, _bytes), 'bits_per_byte': (loglikelihood, _bytes)} |
# File: lm-evaluation-harness-main/lm_eval/tasks/winogrande/preprocess_winogrande.py |
def doc_to_text(doc): |
answer_to_num = {'1': 0, '2': 1} |
return answer_to_num[doc['answer']] |
def doc_to_target(doc): |
idx = doc['sentence'].index('_') + 1 |
return doc['sentence'][idx:].strip() |
def doc_to_choice(doc): |
idx = doc['sentence'].index('_') |
options = [doc['option1'], doc['option2']] |
return [doc['sentence'][:idx] + opt for opt in options] |
# File: lm-evaluation-harness-main/lm_eval/tasks/wsc273/utils.py |
upper_pronouns = ['A', 'An', 'The', 'She', 'He', 'It', 'They', 'My', 'His', 'Her', 'Their'] |
def process_doc(dataset): |
def process_fn(doc): |
doc['text'] = doc['text'].replace(' ', ' ') |
doc['options'][0] = __normalize_option(doc, doc['options'][0]) |
doc['options'][1] = __normalize_option(doc, doc['options'][1]) |
return doc |
return dataset.map(process_fn) |
def __normalize_option(doc, option): |
if doc['pronoun'].lower() in ['my', 'his', 'her', 'our', 'their']: |
option += "'s" |
pronoun = option.split()[0] |
start_of_sentence = doc['text'][doc['pronoun_loc'] - 2] == '.' |
if not start_of_sentence and pronoun in upper_pronouns: |
return option.replace(pronoun, pronoun.lower()) |
return option |
# File: lm-evaluation-harness-main/lm_eval/tasks/xcopa/utils.py |
from functools import partial |
def convert_choice(choice): |
return choice[0].lower() + choice[1:] |