Datasets:
inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/dataset/xinhua.py
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
# UHGEval/uhgeval/evaluator/base.py
def read_output(self) -> dict:
with open(self.output_path, encoding='utf-8') as f:
return json.load(f)
# UHGEval/uhgeval/evaluator/base.py
def save_output(self, output: dict) -> None:
"""Save evaluation results."""
with open(self.output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False, indent=4)
"""
# @Author : YeZhaohui Wang
# @Email : wyzh0912@126.com
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
<fim_suffix>
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC2(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
<fim_middle>self.data = [] | self.data = [] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
<fim_suffix>
<fim_middle>return accuracy, precision, recall, f1 | return accuracy, precision, recall, f1 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
<fim_suffix>
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>f1 = 2 * (precision * recall) / (precision + recall) | f1 = 2 * (precision * recall) / (precision + recall) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
<fim_suffix>
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>logger.warning(repr(e)) | logger.warning(repr(e)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
<fim_suffix>
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) | false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
<fim_suffix>
return accuracy, precision, recall, f1
<fim_middle>accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 | accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
<fim_suffix>
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) | true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
<fim_suffix>
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 | precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/dataset/xinhua.py
def load(self) -> list[dict]:
return self.data[:]
# UHGEval/uhgeval/dataset/base.py
def load(self) -> list[dict]:
...
# UHGEval/uhgeval/evaluator/base.py
def read_output(self) -> dict:
with open(self.output_path, encoding='utf-8') as f:
return json.load(f)
"""
# @Author : YeZhaohui Wang
# @Email : wyzh0912@126.com
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
<fim_suffix>
class TruthfunQAMC2(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
<fim_middle>return self.data[:] | return self.data[:] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
<fim_suffix>
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>return result | return result | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
<fim_suffix>
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall) | if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
<fim_suffix>
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>try:
result = func(*args, **kwargs)
return result | try:
result = func(*args, **kwargs)
return result | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
<fim_suffix>
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>except Exception as e:
logger.warning(repr(e)) | except Exception as e:
logger.warning(repr(e)) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
<fim_suffix>
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
""" | """
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/image_list.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/models/segment_anything/utils/transforms.py
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
# UniRef/detectron2/structures/rotated_boxes.py
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
# UniRef/detectron2/utils/events.py
def put_image(self, img_name, img_tensor):
"""
Add an `img_tensor` associated with `img_name`, to be shown on
tensorboard.
Args:
img_name (str): The name of the image to put into tensorboard.
img_tensor (torch.Tensor or numpy.array): An `uint8` or `float`
Tensor of shape `[channel, height, width]` where `channel` is
3. The image format should be RGB. The elements in img_tensor
can either have values in [0, 1] (float32) or [0, 255] (uint8).
The `img_tensor` will be visualized in tensorboard.
"""
self._vis_data.append((img_name, img_tensor, self._iter))
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.layers.wrappers import shapes_to_tensor
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size.
The original sizes of each image is stored in `image_sizes`.
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
<fim_suffix>
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def device(self) -> device:
return self.tensor.device
@staticmethod
def from_tensors(
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
if torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
for img, pad_img in zip(tensors, batched_imgs):
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes)
<fim_middle>"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
""" | """
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/solver/build.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/engine/hooks.py
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
logger = logging.getLogger(__name__)
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
# UniRef/projects/UniRef/uniref/data/coco_dataset_mapper.py
def build_transform_gen_fss(cfg, is_train): # for fss task, low-resolution
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = [320, 352, 392, 416, 448, 480, 512, 544, 576, 608, 640]
max_size = 768
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = 480
max_size = 768
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
# UniRef/detectron2/solver/lr_scheduler.py
def __init__(
self,
optimizer: torch.optim.Optimizer,
multiplier: ParamScheduler,
max_iter: int,
last_iter: int = -1,
):
"""
Args:
optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.
``last_iter`` is the same as ``last_epoch``.
multiplier: a fvcore ParamScheduler that defines the multiplier on
every LR of the optimizer
max_iter: the total number of training iterations
"""
if not isinstance(multiplier, ParamScheduler):
raise ValueError(
"_LRMultiplier(multiplier=) must be an instance of fvcore "
f"ParamScheduler. Got {multiplier} instead."
)
self._multiplier = multiplier
self._max_iter = max_iter
super().__init__(optimizer, last_epoch=last_iter)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import logging
from collections import defaultdict
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from detectron2.config import CfgNode
from .lr_scheduler import LRMultiplier, WarmupParamScheduler
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(
optimizer: Type[torch.optim.Optimizer],
*,
per_param_clipper: Optional[_GradientClipper] = None,
global_clipper: Optional[_GradientClipper] = None,
) -> Type[torch.optim.Optimizer]:
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
assert (
per_param_clipper is None or global_clipper is None
), "Not allowed to use both per-parameter clipping and global clipping"
def optimizer_wgc_step(self, closure=None):
if per_param_clipper is not None:
for group in self.param_groups:
for p in group["params"]:
per_param_clipper(p)
else:
# global clipper for future use with detr
# (https://github.com/facebookresearch/detr/pull/287)
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer.__name__ + "WithGradientClip",
(optimizer,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
) -> Type[torch.optim.Optimizer]:
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer type to become a new dynamically created class OptimizerWithGradientClip
that inherits the given optimizer and overrides the `step` method to
include gradient clipping.
Args:
cfg: CfgNode, configuration options
optimizer: type. A subclass of torch.optim.Optimizer
Return:
type: either the input `optimizer` (if gradient clipping is disabled), or
a subclass of it with gradient clipping included in the `step` method.
"""
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
if isinstance(optimizer, torch.optim.Optimizer):
optimizer_type = type(optimizer)
else:
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
optimizer_type = optimizer
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
optimizer_type, per_param_clipper=grad_clipper
)
if isinstance(optimizer, torch.optim.Optimizer):
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
return optimizer
else:
return OptimizerWithGradientClip
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
def get_default_optimizer_params(
model: torch.nn.Module,
base_lr: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_norm: Optional[float] = None,
bias_lr_factor: Optional[float] = 1.0,
weight_decay_bias: Optional[float] = None,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
) -> List[Dict[str, Any]]:
"""
Get default param list for optimizer, with support for a few types of
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
Args:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
bias_lr_factor: multiplier of lr for bias parameters.
weight_decay_bias: override weight decay for bias parameters
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common detection models, ``weight_decay_norm`` is the only option
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
from Detectron1 that are not found useful.
Example:
::
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
lr=0.01, weight_decay=1e-4, momentum=0.9)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
bias_overrides = {}
if bias_lr_factor is not None and bias_lr_factor != 1.0:
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
# exactly the same as regular weights.
if base_lr is None:
raise ValueError("bias_lr_factor requires base_lr")
bias_overrides["lr"] = base_lr * bias_lr_factor
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
hyperparams.update(overrides.get(module_param_name, {}))
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Transform parameter groups into per-parameter structure.
# Later items in `params` can overwrite parameters set in previous items.
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Reorganize the parameter groups and merge duplicated groups.
# The number of parameter groups needs to be as small as possible in order
# to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
# of using a parameter_group per single parameter, we reorganize the
# parameter groups and merge duplicated groups. This approach speeds
# up multi-tensor optimizer significantly.
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
<fim_suffix>
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
if len(steps) != len(cfg.SOLVER.STEPS):
logger = logging.getLogger(__name__)
logger.warning(
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
"These values will be ignored."
)
sched = MultiStepParamScheduler(
values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
milestones=steps,
num_updates=cfg.SOLVER.MAX_ITER,
)
elif name == "WarmupCosineLR":
end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR
assert end_value >= 0.0 and end_value <= 1.0, end_value
sched = CosineParamScheduler(1, end_value)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
sched = WarmupParamScheduler(
sched,
cfg.SOLVER.WARMUP_FACTOR,
min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
cfg.SOLVER.WARMUP_METHOD,
)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
<fim_middle>"""
Build a LR scheduler from config.
""" | """
Build a LR scheduler from config.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
<fim_suffix>
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
""" | """
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/util/box_ops.py
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float, device=masks.device)
x = torch.arange(0, w, dtype=torch.float, device=masks.device)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
# UniRef/detectron2/structures/boxes.py
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
# UniRef/detectron2/data/dataset_mapper.py
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
<fim_suffix>
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
""" | """
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/evaluation/sem_seg_evaluation.py
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
<fim_suffix>
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>"""
Before each uodate call, reset fields first
""" | """
Before each uodate call, reset fields first
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
🤗 Hugging Face | 🤖 ModelScope | 📑 GitHub | 📄Arxiv
Codev-Bench: How Do LLMs Understand Developer-Centric Code Completion?
Introduction
Codev-Bench (Code Development Benchmark), a fine-grained, real-world, repository-level, and developer-centric evaluation framework. Codev-Bench assesses whether a code completion tool can accurately capture a developer's immediate intent and suggest appropriate code snippets across diverse, fine-grained contexts.
In daily IDE-based coding development scenarios, the user's real-time autocompletion needs are diverse. They not only include generating function according to comments but also encompass sub-scenes such as contextual completion for logical blocks, completion of function parameter lists, and completion of ordinary statements. Previous code generatino or completion benchmarks only focus on generating entire function according to comments, for example, HumanEval, MBPP, ClassEval, LiveCodeBench, EvoCodeBench, etc..
To better align with real user development scenarios, we propose Codev-Bench. It not only reproduces the diverse sub-scenes that users may encounter during their development process but also constructs unit tests-based evaluation method to more accurately assess the code quality generated by various LLMs.
Methodology
In detail, first, We extract unit test classes and functions from real GitHub repositories and, complete the installation of environment dependencies and execute the unit tests with the assistance of GPT-4. At the same time, we deploy pytest trace tool to extact the execution traces of unit tests to figure out the target functions related to each unit test. Finally, tree-sitter is used to parse the AST (Abstract Syntax Tree) of the target functions, thus all the sub-functions, comments, logical blocks, statements, etc. can be recognized.
We split the completion sub-scenes or capabilities that users may encounter while developing in an IDE into the following parts:
✅ Scenario 1 - Full block completion: In this scenario, the model is tasked with completing a full code block (e.g., function, if, for, try, or statement) based on a complete, unbroken surrounding context. To pass, the model must accurately complete the block and stop at the correct point, ensuring it passes the unit test successfully.
✅ Scenario 2 - Incomplete suffix completion: Compared to Scenario 1, this scenario focuses on cases where the suffix content following the current cursor is incomplete. It covers two sub-cases: one where all the suffix content after the cursor in entire file is empty, and another where only the content within the current function body after the cursor is missing.
✅ Scenario 3 - Inner block completion: In this scenario, the model is required to complete a portion of code block based on a complete, unbroken surrounding context. In addition, 20% of the samples in this scenario have an empty ground truth, evaluating the ability to recognize when the current block is already complete and no further completion is needed.
✅ Scenario 4 - RAG-based completion: In this scenario, the model builds upon the full block completion task by incorporating a Retrieval-Augmented Generation (RAG) module. The repository is partitioned into chunks, with only functions being considered as candidates. The function containing the current code is used as the query, and the query’s embedding is compared with the embeddings of the candidate functions. The top 3 most similar candidates are then inserted back into the prompt as hints to guide code generation.
How To Use
Data
Reseachers and developers can download the source Github repositories Source_Code.tar.gz and its copy version Source_Code_Copy.tar.gz. These repositories are obtained by EvoCodeBench and are created between Dec 2023 to Feb 2024. In the future, we will continuously crawl and analyze new repositories as the source repositories for evaluation.
All the file can be download as follows:
cd CodevBench
# download the code of source repositories
wget "https://huggingface.co/datasets/TongyiLingma/CodevBench/resolve/main/Source_Code.tar.gz?download=true" -O Source_Code.tar.gz
tar -zxvf Source_Code.tar.gz
# download the copy version of source repositories
wget "https://huggingface.co/datasets/TongyiLingma/CodevBench/resolve/main/Source_Code_Copy.tar.gz?download=true" -O Source_Code_Copy.tar.gz
tar -zxvf Source_Code_Copy.tar.gz
# download repositories' metadata (e.g. unit test paths, functions, target blocks, etc.)
wget "https://huggingface.co/datasets/TongyiLingma/CodevBench/resolve/main/metadatas.tar.gz?download=true" -O metadatas.tar.gz
tar -zxvf metadatas.tar.gz
# download the prompt of each completion question
wget "https://huggingface.co/datasets/TongyiLingma/CodevBench/resolve/main/prompts.tar.gz?download=true" -O prompts.tar.gz
tar -zxvf prompts.tar.gz
# download the predicted response of each LLMs and Code LLMs
wget "https://huggingface.co/datasets/TongyiLingma/CodevBench/resolve/main/predicts.tar.gz?download=true" -O predicts.tar.gz
tar -zxvf predicts.tar.gz
Installation
We recommend reseachers and developers to use conda to create a virtual environment.
cd CodevBench
python3.10 -m venv myenv && source myenv/bin/activate
pip install pytest pandas tqdm fuzzywuzzy
Then, reseachers and developers can build the environment by running the following command.
bash create_env.sh
It will cost a few hours to build the execution environment.
Validation
To validate whether the unit tests of each repository are executed successfully, you can run the following command.
myenv/bin/python src/prepare.py --method retest_block_unit_test --mode prefix_suffix_full_complete_current_block_no_evidence
If almost all the unit tests run successfully, reseachers and developers can proceed to the subsequent steps of calling the model for predictions and evaluations.
Prompts
We split the completion sub-scenes or capabilities as follows:
Scenario 1: ./prompts/prefix_suffix_full_complete_current_block_no_evidence.jsonl
.
Scenario 2: ./prompts/complete_current_header_inner_block_completion.jsonl
and ./prompts/complete_current_header_empty_completion.jsonl
.
Scenario 3: ./prompts/prefix_full_suffix_func_empty_complete_current_block_no_evidence.jsonl
and ./prompts/prefix_full_suffix_empty_complete_current_block_no_evidence.jsonl
.
Scenario 4: ./prompts/prefix_suffix_full_complete_current_block_with_repo_rag_oracle
.
The structure of the prompts is as follows:
{
"func_name": "function file path and line position",
"item_dids": [
"unit test ids"
],
"unit_test_ids": [
"unit test ids"
],
"block_key": "target code block file path and line position",
"block_type": "AST type of block",
"prompt": "<filename>xxx<fim_prefix>xxx<fim_suffix>xxx<fim_middle>xxx",
"prefix": "prefix context of target code block",
"suffix": "suffix context of target code block",
"middle": "ground truth of target code block",
"test_prefix": "prefix context of to construct the unit test",
"test_suffix": "suffix context of to construct the unit test",
"test_middle": "ground truth of target code block to construct the unit test",
}
Predictions
We provide the prefix context and suffix context in the prompt, thus users can call different model (general LLMs or code LLMs) to predict the completion of the target code block.
For general LLMs, we provide the natural language version prompt template in ./src/templates/llm_template.py
, users can use this template to construct final prompt and call the model.
For code LLMs, users should construct the prompt according to the Fill-In-Middle template for the corresponding code LLMs and call the model. We also provide some calling examples in /mnt/coai_nas/qianhu/github/completion_benchmark/src/request_model.py
.
The predicted responses are as follows:
{
"func_name": "function file path and line position",
"item_dids": [
"unit test ids"
],
"unit_test_ids": [
"unit test ids"
],
"block_key": "target code block file path and line position",
"block_type": "AST type of block",
"prompt": "<filename>xxx<fim_prefix>xxx<fim_suffix>xxx<fim_middle>xxx",
"prefix": "prefix context of target code block",
"suffix": "suffix context of target code block",
"middle": "ground truth of target code block",
"test_prefix": "prefix context of to construct the unit test",
"test_suffix": "suffix context of to construct the unit test",
"test_middle": "ground truth of target code block to construct the unit test",
"response_original_text": "original response of the model",
"response": "the parsed final target code for model to complete"
}
We provide some examples in ./predicts/prefix_suffix_full_complete_current_block_no_evidence/predictions/
.
Evaluation
The final step is to fill the predicted code into the cursor position and run the corresponding unit tests.
After calling model and obtaining the predicted responses, we can run the following command to run the unit test:
myenv/bin/python src/evaluate.py --method evaluate_prediction --model codegemma_7b --mode prefix_suffix_full_complete_current_block_no_evidence --check-unittest
Thus, the result file ./predicts/prefix_suffix_full_complete_current_block_no_evidence/results/codegemma_7b.jsonl.x
will be generated. Then, users can use the following command to summarize the results:
# for scenario 1
myenv/bin/python src/evaluate.py --method print_all_scores --model codegemma_7b --mode prefix_suffix_full_complete_current_block_no_evidence
# for scenario 2
myenv/bin/python src/evaluate.py --method print_all_scores --model codegemma_7b --mode complete_current_header_inner_block_and_empty_completion
# for scenario 3
myenv/bin/python src/evaluate.py --method print_all_scores --model codegemma_7b --mode prefix_suffix_empty_current_block
# for scenario 4
myenv/bin/python src/evaluate.py --method print_all_scores --model codegemma_7b --mode prefix_suffix_full_complete_current_block_with_repo_rag_oracle
Experimental Results
Overall Results
The Results of Scenario 1
The Results of Scenario 2
The Results of Scenario 3
The Results of Scenario 4
Citation
If you use this dataset in your research, please cite the following paper:
@article{pan2024codev,
title={Codev-Bench: How Do LLMs Understand Developer-Centric Code Completion?},
author={Pan, Zhenyu and Cao, Rongyu and Cao, Yongchang and Ma, Yingwei and Li, Binhua and Huang, Fei and Liu, Han and Li, Yongbin},
journal={arXiv preprint arXiv:2410.01353},
year={2024}
}
- Downloads last month
- 115