Spaces:
Sleeping
Sleeping
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
import numpy as np | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
import getpass | |
import pdb | |
import os | |
import torch | |
from rouge_score import scoring | |
from contextlib import contextmanager | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {A great new module}, | |
authors={huggingface, Inc.}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
local coherecence with classifier trained on the shuffle task, window=3 sentences | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each predictions | |
should be a string with tokens separated by spaces. | |
references: list of reference for each prediction. Each | |
reference should be a string with tokens separated by spaces. | |
Returns: | |
accuracy: description of the first score, | |
another_score: description of the second score, | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> my_new_module = evaluate.load("my_new_module") | |
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
>>> print(results) | |
{'accuracy': 1.0} | |
""" | |
WINDOW_SIZE = 3 | |
def filter_logging_context(): | |
def filter_log(record): | |
return False if "This IS expected if you are initializing" in record.msg else True | |
logger = datasets.utils.logging.get_logger("transformers.modeling_utils") | |
logger.addFilter(filter_log) | |
try: | |
yield | |
finally: | |
logger.removeFilter(filter_log) | |
class Scorer: | |
def __init__( | |
self, | |
model_type=None, | |
batch_size=64, | |
device=None, | |
use_fast_tokenizer=False): | |
if device is not None: | |
# assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." | |
if device == "gpu": | |
device = "cuda" | |
else: | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
self.device = device | |
self.model_type = model_type | |
self.batch_size = batch_size | |
self._tokenizer = AutoTokenizer.from_pretrained("roberta-large") | |
self._model = AutoModelForSequenceClassification.from_pretrained(f"ronaldahmed/ccl_win-{model_type}") | |
self._model.to(device) | |
self._model.eval() | |
def hash(self): | |
return self.model_type | |
def preprocess_adjacent_window(self,preds): | |
pred_list = [] | |
lens = [] | |
for pred in preds: | |
sents = pred.split("\n") | |
ns = len(sents) | |
if ns <= WINDOW_SIZE: | |
pred_list.append(pred) | |
lens.append(1) | |
else: | |
llen = 0 | |
for i in range(0,ns-WINDOW_SIZE+1): | |
sss = sents[i:i+WINDOW_SIZE] | |
ss = "\n".join(sss) | |
pred_list.append(ss) | |
llen += 1 | |
lens.append(llen) | |
# | |
return pred_list,lens | |
def score(self,predictions): | |
sent_lens = [len(x.split("\n")) for x in predictions] | |
pred_list,len_by_sample = self.preprocess_adjacent_window(predictions) | |
scores = [] | |
n_preds = len(pred_list) | |
with torch.no_grad(): | |
for b in range(0,n_preds,self.batch_size): | |
strides = [x.lower() for x in pred_list[b:b+self.batch_size]] | |
tinput = self._tokenizer(strides,padding=True,truncation=True,max_length=512,return_tensors="pt") | |
tinput = {k:v.to(self.device) for k,v in tinput.items()} | |
output = self._model(**tinput) | |
probs = torch.softmax(output.logits,dim=-1).detach().cpu().numpy() | |
scores.extend(probs[:,0].tolist()) | |
# | |
results = [] | |
offset = 0 | |
for i,_len in enumerate(len_by_sample): | |
score = float(np.mean(scores[offset:offset+_len])) if sent_lens[i]>1 else 0. | |
results.append(score) | |
offset += _len | |
# | |
return results | |
class ccl_win(evaluate.Measurement): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.MeasurementInfo( | |
# This is the description that will appear on the modules page. | |
module_type="measurement", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features({ | |
'predictions': datasets.Value('string'), | |
}), | |
# Homepage of the module for documentation | |
homepage="http://module.homepage", | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"] | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def _compute(self, predictions, dataset="arxiv", batch_size: int = 16, device=None, use_aggregator=True): | |
"""Returns the scores""" | |
hashcode = dataset | |
with filter_logging_context(): | |
if not hasattr(self, "cached_scorer") or self.cached_scorer.hash != hashcode: | |
self.cached_scorer = Scorer( | |
model_type=dataset, | |
batch_size=batch_size, | |
device=device, | |
) | |
results = self.cached_scorer.score(predictions) | |
outres = {} | |
aggregator = None | |
if use_aggregator: | |
np.random.seed(42) | |
aggregator = scoring.BootstrapAggregator() | |
for score in results: | |
aggregator.add_scores({"loc_coh_ccl": score}) | |
# | |
res = aggregator.aggregate() | |
for k in res: outres[k] = res[k].mid | |
else: | |
outres = {"loc_coh_ccl": results} | |
return outres | |