Spaces:
Sleeping
Sleeping
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
import numpy as np | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
import getpass | |
import pdb | |
import os | |
import torch | |
from rouge_score import scoring | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {A great new module}, | |
authors={huggingface, Inc.}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
local coherecence with classifier trained on the shuffle task, window=3 sentences | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each predictions | |
should be a string with tokens separated by spaces. | |
references: list of reference for each prediction. Each | |
reference should be a string with tokens separated by spaces. | |
Returns: | |
accuracy: description of the first score, | |
another_score: description of the second score, | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> my_new_module = evaluate.load("my_new_module") | |
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
>>> print(results) | |
{'accuracy': 1.0} | |
""" | |
# TODO: Define external resources urls if needed | |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
WINDOW_SIZE = 3 | |
class ccl_win(evaluate.Measurement): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.MeasurementInfo( | |
# This is the description that will appear on the modules page. | |
module_type="measurement", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features({ | |
'predictions': datasets.Value('string'), | |
}), | |
# Homepage of the module for documentation | |
homepage="http://module.homepage", | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"] | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def preprocess_adjacent_window(self,preds): | |
pred_list = [] | |
lens = [] | |
for pred in preds: | |
sents = pred.split("\n") | |
ns = len(sents) | |
if ns <= WINDOW_SIZE: | |
pred_list.append(pred) | |
lens.append(1) | |
else: | |
llen = 0 | |
for i in range(0,ns-WINDOW_SIZE+1): | |
sss = sents[i:i+WINDOW_SIZE] | |
ss = "\n".join(sss) | |
pred_list.append(ss) | |
llen += 1 | |
lens.append(llen) | |
# | |
return pred_list,lens | |
def _compute(self, predictions, dataset="arxiv", batch_size: int = 16, device=None, use_aggregator=True): | |
"""Returns the scores""" | |
MODEL_CACHE_DIR = "/home/rcardena/.cache/huggingface/" | |
BASEDIR = "/bask/projects/j/jlxi8926-auto-sum/rcardenas/tools/ccl_win" | |
if getpass.getuser() == "s1987051": | |
MODEL_CACHE_DIR="/disk/ocean/rcardenas/tools/huggingface/" | |
elif getpass.getuser() == "rcardena": | |
MODEL_CACHE_DIR="/gfs/team/nlp/users/rcardena/tools/huggingface/" | |
elif getpass.getuser() == "gvhr8913": | |
MODEL_CACHE_DIR="/bask/projects/j/jlxi8926-auto-sum/rcardenas/cache" | |
if device is not None: | |
# assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." | |
if device == "gpu": | |
device = "cuda" | |
else: | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
results = [] | |
sent_lens = [len(x.split("\n")) for x in predictions] | |
aggregator = None | |
if use_aggregator: | |
np.random.seed(42) | |
aggregator = scoring.BootstrapAggregator() | |
tokenizer = AutoTokenizer.from_pretrained("roberta-large") | |
model = AutoModelForSequenceClassification.from_pretrained(os.path.join(BASEDIR,dataset)) | |
model.to(device) | |
model.eval() | |
pred_list,len_by_sample = self.preprocess_adjacent_window(predictions) | |
scores = [] | |
n_preds = len(pred_list) | |
with torch.no_grad(): | |
for b in range(0,n_preds,batch_size): | |
strides = [x.lower() for x in pred_list[b:b+batch_size]] | |
tinput = tokenizer(strides,padding=True,truncation=True,max_length=512,return_tensors="pt") | |
tinput = {k:v.to(device) for k,v in tinput.items()} | |
output = model(**tinput) | |
probs = torch.softmax(output.logits,dim=-1).detach().cpu().numpy() | |
scores.extend(probs[:,0].tolist()) | |
# | |
offset = 0 | |
for i,_len in enumerate(len_by_sample): | |
score = float(np.mean(scores[offset:offset+_len])) if sent_lens[i]>1 else 0. | |
if use_aggregator: | |
aggregator.add_scores({"loc_coh_ccl": score}) | |
else: | |
results.append(score) | |
offset += _len | |
# | |
outres = {} | |
if use_aggregator: | |
res = aggregator.aggregate() | |
for k in res: outres[k] = res[k].mid | |
else: | |
outres = {"loc_coh_ccl": results} | |
return outres |