inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/dataset/xinhua.py
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
# UHGEval/uhgeval/evaluator/base.py
def read_output(self) -> dict:
with open(self.output_path, encoding='utf-8') as f:
return json.load(f)
# UHGEval/uhgeval/evaluator/base.py
def save_output(self, output: dict) -> None:
"""Save evaluation results."""
with open(self.output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False, indent=4)
"""
# @Author : YeZhaohui Wang
# @Email : wyzh0912@126.com
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
<fim_suffix>
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC2(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
<fim_middle>self.data = [] | self.data = [] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
<fim_suffix>
<fim_middle>return accuracy, precision, recall, f1 | return accuracy, precision, recall, f1 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
<fim_suffix>
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>f1 = 2 * (precision * recall) / (precision + recall) | f1 = 2 * (precision * recall) / (precision + recall) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
<fim_suffix>
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>logger.warning(repr(e)) | logger.warning(repr(e)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
<fim_suffix>
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) | false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
<fim_suffix>
return accuracy, precision, recall, f1
<fim_middle>accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 | accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
<fim_suffix>
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) | true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
<fim_suffix>
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 | precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/dataset/truthfulqa.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/dataset/xinhua.py
def load(self) -> list[dict]:
return self.data[:]
# UHGEval/uhgeval/dataset/base.py
def load(self) -> list[dict]:
...
# UHGEval/uhgeval/evaluator/base.py
def read_output(self) -> dict:
with open(self.output_path, encoding='utf-8') as f:
return json.load(f)
"""
# @Author : YeZhaohui Wang
# @Email : wyzh0912@126.com
import csv
import json
import os
import random
from uhgeval.dataset.base import BaseDataset
class TruthfunQAGeneration(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
if os.path.isfile(path):
with open(path, 'r', encoding='utf-8-sig') as file:
csv_reader = csv.DictReader(file)
id = 1
for row in csv_reader:
row['id'] = id
id += 1
self.data.append(row)
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
class TruthfunQAMC1(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
<fim_suffix>
class TruthfunQAMC2(BaseDataset):
def __init__(self, path: str, shuffle: bool = False, seed: int = 22):
self.data = []
id = 1
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
self.data = json.load(f)
for row in self.data:
row['id'] = id
id += 1
if shuffle:
random.seed(seed)
random.shuffle(self.data)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, key: int | slice) -> dict | list[dict]:
return self.data[key]
def load(self) -> list[dict]:
return self.data[:]
<fim_middle>return self.data[:] | return self.data[:] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
<fim_suffix>
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>return result | return result | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
<fim_suffix>
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall) | if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
<fim_suffix>
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>try:
result = func(*args, **kwargs)
return result | try:
result = func(*args, **kwargs)
return result | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
# UHGEval/uhgeval/metric/tmp_common.py
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
# UHGEval/uhgeval/llm/base.py
def safe_request(self, query: str) -> str:
"""Safely make a request to the language model, handling exceptions."""
try:
response = self.request(query)
except Exception as e:
logger.warning(repr(e))
response = ''
return response
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
<fim_suffix>
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>except Exception as e:
logger.warning(repr(e)) | except Exception as e:
logger.warning(repr(e)) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UHGEval/uhgeval/metric/common.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UHGEval/uhgeval/metric/tmp_common.py
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
# UHGEval/uhgeval/evaluator/discriminative.py
def scoring(self, data_point: dict) -> dict:
"""Evaluate the keyword hallucination detection ability of the model.
Args:
data_point (dict): A data point in the dataset.
Returns:
dict: A result dictionary.
Note:
True and positive are used to describe hallucinations.
False and negative are used to describe non-hallucinations.
"""
kws = list(data_point['allKeywords'].keys())
appeared_kws = data_point['appearedKeywords']
unappeared_kws = [kw for kw in kws if kw not in appeared_kws]
# Do not consider keywords already appeared in the original text
# Ground truth values
true = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('不合理')]
false = [kw for kw in unappeared_kws if data_point['allKeywords'][kw].startswith('合理')]
num_each_side = min(len(true), len(false)) # Equal number of positive and negative examples
true, false = true[:num_each_side], false[:num_each_side]
# Predicted values
predictions = dict()
for kw in true:
predictions[kw] = (1, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
for kw in false:
predictions[kw] = (0, *self.model.is_kw_hallucinated(kw, data_point, with_reason=True))
# Dictionary format: `{'keyword': (ground_truth, prediction, reason), ...}`
# Get metric values
accuracy, precision, recall, f1 = classifications(
predictions=[item[1] for item in predictions.values()],
references=[item[0] for item in predictions.values()]
)
return {
'metrics': {
'accuracy': accuracy,
'num_kws': num_each_side*2
},
'log': {
'predictions': predictions,
'evaluateDatetime': str(datetime.datetime.now()),
},
'valid': num_each_side > 0 and not any([answer not in {0, 1} for answer in [item[1] for item in predictions.values()]])
}
# UHGEval/uhgeval/.cache/huggingface/bleu/nmt_bleu.py
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
"""
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
<fim_suffix>
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
<fim_middle>"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
""" | """
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/image_list.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/models/segment_anything/utils/transforms.py
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
# UniRef/detectron2/structures/rotated_boxes.py
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
# UniRef/detectron2/utils/events.py
def put_image(self, img_name, img_tensor):
"""
Add an `img_tensor` associated with `img_name`, to be shown on
tensorboard.
Args:
img_name (str): The name of the image to put into tensorboard.
img_tensor (torch.Tensor or numpy.array): An `uint8` or `float`
Tensor of shape `[channel, height, width]` where `channel` is
3. The image format should be RGB. The elements in img_tensor
can either have values in [0, 1] (float32) or [0, 255] (uint8).
The `img_tensor` will be visualized in tensorboard.
"""
self._vis_data.append((img_name, img_tensor, self._iter))
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.layers.wrappers import shapes_to_tensor
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size.
The original sizes of each image is stored in `image_sizes`.
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
<fim_suffix>
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def device(self) -> device:
return self.tensor.device
@staticmethod
def from_tensors(
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
if torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
for img, pad_img in zip(tensors, batched_imgs):
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes)
<fim_middle>"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
""" | """
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/solver/build.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/engine/hooks.py
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
logger = logging.getLogger(__name__)
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
# UniRef/projects/UniRef/uniref/data/coco_dataset_mapper.py
def build_transform_gen_fss(cfg, is_train): # for fss task, low-resolution
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = [320, 352, 392, 416, 448, 480, 512, 544, 576, 608, 640]
max_size = 768
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = 480
max_size = 768
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
# UniRef/detectron2/solver/lr_scheduler.py
def __init__(
self,
optimizer: torch.optim.Optimizer,
multiplier: ParamScheduler,
max_iter: int,
last_iter: int = -1,
):
"""
Args:
optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.
``last_iter`` is the same as ``last_epoch``.
multiplier: a fvcore ParamScheduler that defines the multiplier on
every LR of the optimizer
max_iter: the total number of training iterations
"""
if not isinstance(multiplier, ParamScheduler):
raise ValueError(
"_LRMultiplier(multiplier=) must be an instance of fvcore "
f"ParamScheduler. Got {multiplier} instead."
)
self._multiplier = multiplier
self._max_iter = max_iter
super().__init__(optimizer, last_epoch=last_iter)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import logging
from collections import defaultdict
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from detectron2.config import CfgNode
from .lr_scheduler import LRMultiplier, WarmupParamScheduler
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(
optimizer: Type[torch.optim.Optimizer],
*,
per_param_clipper: Optional[_GradientClipper] = None,
global_clipper: Optional[_GradientClipper] = None,
) -> Type[torch.optim.Optimizer]:
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
assert (
per_param_clipper is None or global_clipper is None
), "Not allowed to use both per-parameter clipping and global clipping"
def optimizer_wgc_step(self, closure=None):
if per_param_clipper is not None:
for group in self.param_groups:
for p in group["params"]:
per_param_clipper(p)
else:
# global clipper for future use with detr
# (https://github.com/facebookresearch/detr/pull/287)
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer.__name__ + "WithGradientClip",
(optimizer,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
) -> Type[torch.optim.Optimizer]:
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer type to become a new dynamically created class OptimizerWithGradientClip
that inherits the given optimizer and overrides the `step` method to
include gradient clipping.
Args:
cfg: CfgNode, configuration options
optimizer: type. A subclass of torch.optim.Optimizer
Return:
type: either the input `optimizer` (if gradient clipping is disabled), or
a subclass of it with gradient clipping included in the `step` method.
"""
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
if isinstance(optimizer, torch.optim.Optimizer):
optimizer_type = type(optimizer)
else:
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
optimizer_type = optimizer
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
optimizer_type, per_param_clipper=grad_clipper
)
if isinstance(optimizer, torch.optim.Optimizer):
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
return optimizer
else:
return OptimizerWithGradientClip
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
def get_default_optimizer_params(
model: torch.nn.Module,
base_lr: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_norm: Optional[float] = None,
bias_lr_factor: Optional[float] = 1.0,
weight_decay_bias: Optional[float] = None,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
) -> List[Dict[str, Any]]:
"""
Get default param list for optimizer, with support for a few types of
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
Args:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
bias_lr_factor: multiplier of lr for bias parameters.
weight_decay_bias: override weight decay for bias parameters
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common detection models, ``weight_decay_norm`` is the only option
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
from Detectron1 that are not found useful.
Example:
::
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
lr=0.01, weight_decay=1e-4, momentum=0.9)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
bias_overrides = {}
if bias_lr_factor is not None and bias_lr_factor != 1.0:
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
# exactly the same as regular weights.
if base_lr is None:
raise ValueError("bias_lr_factor requires base_lr")
bias_overrides["lr"] = base_lr * bias_lr_factor
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
hyperparams.update(overrides.get(module_param_name, {}))
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Transform parameter groups into per-parameter structure.
# Later items in `params` can overwrite parameters set in previous items.
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Reorganize the parameter groups and merge duplicated groups.
# The number of parameter groups needs to be as small as possible in order
# to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
# of using a parameter_group per single parameter, we reorganize the
# parameter groups and merge duplicated groups. This approach speeds
# up multi-tensor optimizer significantly.
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
<fim_suffix>
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
if len(steps) != len(cfg.SOLVER.STEPS):
logger = logging.getLogger(__name__)
logger.warning(
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
"These values will be ignored."
)
sched = MultiStepParamScheduler(
values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
milestones=steps,
num_updates=cfg.SOLVER.MAX_ITER,
)
elif name == "WarmupCosineLR":
end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR
assert end_value >= 0.0 and end_value <= 1.0, end_value
sched = CosineParamScheduler(1, end_value)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
sched = WarmupParamScheduler(
sched,
cfg.SOLVER.WARMUP_FACTOR,
min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
cfg.SOLVER.WARMUP_METHOD,
)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
<fim_middle>"""
Build a LR scheduler from config.
""" | """
Build a LR scheduler from config.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
<fim_suffix>
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
""" | """
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/util/box_ops.py
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float, device=masks.device)
x = torch.arange(0, w, dtype=torch.float, device=masks.device)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
# UniRef/detectron2/structures/boxes.py
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
# UniRef/detectron2/data/dataset_mapper.py
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
<fim_suffix>
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
""" | """
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/evaluation/sem_seg_evaluation.py
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
<fim_suffix>
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>"""
Before each uodate call, reset fields first
""" | """
Before each uodate call, reset fields first
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/structures/rotated_boxes.py
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
# UniRef/detectron2/structures/masks.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
# UniRef/detectron2/structures/keypoints.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
"""
Create a new `Keypoints` by indexing on this `Keypoints`.
The following usage are allowed:
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
2. `new_kpts = kpts[2:10]`: return a slice of key points.
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
Note that the returned Keypoints might share storage with this Keypoints,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Keypoints([self.tensor[item]])
return Keypoints(self.tensor[item])
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
<fim_suffix>
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoA, sized [N,M].
"""
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
"""
Pairwise distance between N points and M boxes. The distance between a
point and a box is represented by the distance from the point to 4 edges
of the box. Distances are all positive when the point is inside the box.
Args:
points: Nx2 coordinates. Each row is (x, y)
boxes: M boxes
Returns:
Tensor: distances of size (N, M, 4). The 4 values are distances from
the point to the left, top, right, bottom of the box.
"""
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
<fim_middle>"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
""" | """
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/instances.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/catalog.py
def set(self, **kwargs):
"""
Set multiple metadata with kwargs.
"""
for k, v in kwargs.items():
setattr(self, k, v)
return self
# UniRef/projects/UniRef/uniref/data/ytvis_eval.py
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
if self.dataset_name.startswith("bdd"):
prediction = instances_to_coco_json_video_bdd(inputs, outputs)
for k, v in prediction.items():
self._predictions[k].extend(v)
elif self.dataset_name.startswith("ytvis"):
prediction = instances_to_coco_json_video(inputs, outputs)
self._predictions.extend(prediction)
elif self.dataset_name.startswith("refytvos"):
pass
elif self.dataset_name.startswith("refdavis"):
pass
elif self.dataset_name.startswith("ytbvos"):
pass
elif self.dataset_name.startswith("davis"):
pass
elif self.dataset_name.startswith("vos-lvos"):
pass
elif self.dataset_name.startswith("mose"):
pass
else:
raise NotImplementedError
# UniRef/detectron2/utils/events.py
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
"""
Args:
log_dir (str): the directory to save the output events
window_size (int): the scalars will be median-smoothed by this window size
kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
"""
self._window_size = window_size
from torch.utils.tensorboard import SummaryWriter
self._writer = SummaryWriter(log_dir, **kwargs)
self._last_write = -1
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
<fim_suffix>
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
data_len = len(value)
if len(self._fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
# use __len__ because len() has to be int and is not friendly to tracing
return v.__len__()
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
<fim_middle>"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
""" | """
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/visualizer.py
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
# UniRef/detectron2/utils/visualizer.py
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
# UniRef/detectron2/layers/mask_ops.py
def paste_masks_in_image(
masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5
):
"""
Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Note:
This is a complicated but more accurate implementation. In actual deployment, it is
often enough to use a faster but less accurate implementation.
See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
Args:
masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
detected object instances in the image and Hmask, Wmask are the mask width and mask
height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
boxes[i] and masks[i] correspond to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
N = len(masks)
if N == 0:
return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
if not isinstance(boxes, torch.Tensor):
boxes = boxes.tensor
device = boxes.device
assert len(boxes) == N, boxes.shape
img_h, img_w = image_shape
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == "cpu" or torch.jit.is_scripting():
# CPU is most efficient when they are pasted one by one with skip_empty=True
# so that it performs minimal number of operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks, but may have memory issue
# int(img_h) because shape may be tensors in tracing
num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (
num_chunks <= N
), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
img_masks = torch.zeros(
N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
if torch.jit.is_scripting(): # Scripting does not use the optimized codepath
img_masks[inds] = masks_chunk
else:
img_masks[(inds,) + spatial_inds] = masks_chunk
return img_masks
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
<fim_suffix>
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
""" | """
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
<fim_suffix>
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
""" | """
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
<fim_suffix>
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
""" | """
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
<fim_suffix>
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>self._id_count += len(instances) | self._id_count += len(instances) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
# UniRef/external/lvos-evaluation/lvos/metrics.py
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
# UniRef/detectron2/layers/rotated_boxes.py
def pairwise_iou_rotated(boxes1, boxes2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in
(x_center, y_center, width, height, angle) format.
Arguments:
boxes1 (Tensor[N, 5])
boxes2 (Tensor[M, 5])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2)
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
<fim_suffix>
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle>inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) | inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/events.py
def put_image(self, img_name, img_tensor):
"""
Add an `img_tensor` associated with `img_name`, to be shown on
tensorboard.
Args:
img_name (str): The name of the image to put into tensorboard.
img_tensor (torch.Tensor or numpy.array): An `uint8` or `float`
Tensor of shape `[channel, height, width]` where `channel` is
3. The image format should be RGB. The elements in img_tensor
can either have values in [0, 1] (float32) or [0, 255] (uint8).
The `img_tensor` will be visualized in tensorboard.
"""
self._vis_data.append((img_name, img_tensor, self._iter))
# UniRef/detectron2/export/shared.py
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray,
workspace.Int8Tensor,
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
name, type(blob)
)
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
# UniRef/detectron2/modeling/anchor_generator.py
def forward(self, features):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
"""
import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# Eqn. (7)
loss = 1 - iou + (distance / diag_len)
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
<fim_suffix>
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
# Eqn. (10)
loss = 1 - iou + (distance / diag_len) + alpha * v
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
<fim_middle>intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) | intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/events.py
def put_image(self, img_name, img_tensor):
"""
Add an `img_tensor` associated with `img_name`, to be shown on
tensorboard.
Args:
img_name (str): The name of the image to put into tensorboard.
img_tensor (torch.Tensor or numpy.array): An `uint8` or `float`
Tensor of shape `[channel, height, width]` where `channel` is
3. The image format should be RGB. The elements in img_tensor
can either have values in [0, 1] (float32) or [0, 255] (uint8).
The `img_tensor` will be visualized in tensorboard.
"""
self._vis_data.append((img_name, img_tensor, self._iter))
# UniRef/detectron2/export/shared.py
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray,
workspace.Int8Tensor,
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
name, type(blob)
)
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
# UniRef/detectron2/modeling/anchor_generator.py
def forward(self, features):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
"""
import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# Eqn. (7)
loss = 1 - iou + (distance / diag_len)
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
<fim_suffix>
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
# Eqn. (10)
loss = 1 - iou + (distance / diag_len) + alpha * v
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
<fim_middle>xc1 = torch.min(x1, x1g) | xc1 = torch.min(x1, x1g) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/utils/testing.py
def convert_scripted_instances(instances):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert hasattr(
instances, "image_size"
), f"Expect an Instances object, but got {type(instances)}!"
ret = Instances(instances.image_size)
for name in instances._field_names:
val = getattr(instances, "_" + name, None)
if val is not None:
ret.set(name, val)
return ret
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
<fim_suffix>
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) | untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/models/segment_anything/modeling/transformer.py
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
# UniRef/tools/plain_train_net.py
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
# UniRef/detectron2/utils/video_visualizer.py
def _assign_colors_by_id(self, instances: Instances) -> List:
colors = []
untracked_ids = set(self._assigned_colors.keys())
for id in instances.ID:
if id in self._assigned_colors:
colors.append(self._color_pool[self._assigned_colors[id]])
untracked_ids.remove(id)
else:
assert len(self._color_idx_set) >= 1, f"Number of id exceeded maximum, \
max = {self._max_num_instances}"
idx = self._color_idx_set.pop()
color = self._color_pool[idx]
self._assigned_colors[id] = idx
colors.append(color)
for id in untracked_ids:
self._color_idx_set.add(self._assigned_colors[id])
del self._assigned_colors[id]
return colors
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
<fim_suffix>
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle>m1, m2 = min(names), max(names) | m1, m2 = min(names), max(names) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/visualizer.py
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
# UniRef/detectron2/utils/visualizer.py
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
# UniRef/detectron2/utils/colormap.py
def colormap(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
<fim_suffix>
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>return mask_util.decode(rle).astype(np.bool) | return mask_util.decode(rle).astype(np.bool) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/util/box_ops.py
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float, device=masks.device)
x = torch.arange(0, w, dtype=torch.float, device=masks.device)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
# UniRef/projects/UniRef/uniref/models/deformable_detr/deformable_transformer.py
def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals
# UniRef/detectron2/export/shared.py
def get_producer_map(ssa):
"""
Return dict from versioned blob to (i, j),
where i is index of producer op, j is the index of output of that op.
"""
producer_map = {}
for i in range(len(ssa)):
outputs = ssa[i][1]
for j, outp in enumerate(outputs):
producer_map[outp] = (i, j)
return producer_map
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
<fim_suffix>
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>boxes[idx, 2:] = maxxy | boxes[idx, 2:] = maxxy | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/layers/losses.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/evaluation/refcocoeval.py
def compute_mask_iou(outputs: torch.Tensor, labels: torch.Tensor, EPS=1e-6):
outputs = outputs.int()
intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0
union = (outputs | labels).float().sum((1, 2)) # Will be zero if both are 0
iou = (intersection + EPS) / (union + EPS) # EPS is used to avoid division by zero
return iou, intersection, union
# UniRef/detectron2/export/shared.py
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray,
workspace.Int8Tensor,
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
name, type(blob)
)
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
# UniRef/projects/UniRef/uniref/util/box_ops.py
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area+1e-7)
"""
import math
import torch
def diou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
<fim_suffix>
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# Eqn. (7)
loss = 1 - iou + (distance / diag_len)
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
# Eqn. (10)
loss = 1 - iou + (distance / diag_len) + alpha * v
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
<fim_middle>x_g = (x1g + x2g) / 2 | x_g = (x1g + x2g) / 2 | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/utils/testing.py
def convert_scripted_instances(instances):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert hasattr(
instances, "image_size"
), f"Expect an Instances object, but got {type(instances)}!"
ret = Instances(instances.image_size)
for name in instances._field_names:
val = getattr(instances, "_" + name, None)
if val is not None:
ret.set(name, val)
return ret
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
<fim_suffix>
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>untracked_instances.ID.append(self._prev_instances.ID[idx]) | untracked_instances.ID.append(self._prev_instances.ID[idx]) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
<fim_suffix>
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances)) | if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
# UniRef/external/lvos-evaluation/lvos/metrics.py
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
# UniRef/external/lvos-evaluation/lvos/metrics.py
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
<fim_suffix>
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle>if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall) | if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/transforms/augmentation.py
def _get_aug_input_args(aug, aug_input) -> List[Any]:
"""
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
"""
if aug.input_args is None:
# Decide what attributes are needed automatically
prms = list(inspect.signature(aug.get_transform).parameters.items())
# The default behavior is: if there is one parameter, then its "image"
# (work automatically for majority of use cases, and also avoid BC breaking),
# Otherwise, use the argument names.
if len(prms) == 1:
names = ("image",)
else:
names = []
for name, prm in prms:
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
raise TypeError(
f""" \
The default implementation of `{type(aug)}.__call__` does not allow \
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
If arguments are unknown, reimplement `__call__` instead. \
"""
)
names.append(name)
aug.input_args = tuple(names)
args = []
for f in aug.input_args:
try:
args.append(getattr(aug_input, f))
except AttributeError as e:
raise AttributeError(
f"{type(aug)}.get_transform needs input attribute '{f}', "
f"but it is not an attribute of {type(aug_input)}!"
) from e
return args
# UniRef/detectron2/config/instantiate.py
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
# UniRef/detectron2/modeling/roi_heads/cascade_rcnn.py
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret.pop("proposal_matcher")
return ret
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
<fim_suffix>
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs) | if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
<fim_suffix>
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances)) | if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
<fim_suffix>
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls) | if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
<fim_suffix>
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if not instances.has("ID"):
instances.set("ID", [None] * len(instances)) | if not instances.has("ID"):
instances.set("ID", [None] * len(instances)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/instances.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/export/c10.py
def __getattr__(self, name):
if name not in self.batch_extra_fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self.batch_extra_fields[name]
# UniRef/projects/UniRef/uniref/util/misc.py
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
# UniRef/detectron2/engine/train_loop.py
def load_state_dict(self, state_dict):
logger = logging.getLogger(__name__)
self.iter = state_dict["iteration"]
for key, value in state_dict.get("hooks", {}).items():
for h in self._hooks:
try:
name = type(h).__qualname__
except AttributeError:
continue
if name == key:
h.load_state_dict(value)
break
else:
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
<fim_suffix>
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
data_len = len(value)
if len(self._fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
# use __len__ because len() has to be int and is not friendly to tracing
return v.__len__()
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
<fim_middle>if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) | if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
<fim_suffix>
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances)) | if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/tracking/base_tracker.py
def __init__(self, **kwargs):
self._prev_instances = None # (D2)instances for previous frame
self._matched_idx = set() # indices in prev_instances found matching
self._matched_ID = set() # idendities in prev_instances found matching
self._untracked_prev_idx = set() # indices in prev_instances not found matching
self._id_count = 0 # used to assign new id
# UniRef/detectron2/tracking/hungarian_tracker.py
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
<fim_suffix>
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances) | if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
# UniRef/detectron2/utils/testing.py
def convert_scripted_instances(instances):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert hasattr(
instances, "image_size"
), f"Expect an Instances object, but got {type(instances)}!"
ret = Instances(instances.image_size)
for name in instances._field_names:
val = getattr(instances, "_" + name, None)
if val is not None:
ret.set(name, val)
return ret
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
<fim_suffix>
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) | if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/structures/rotated_boxes.py
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor)
# UniRef/detectron2/evaluation/pascal_voc_evaluation.py
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
# UniRef/detectron2/structures/masks.py
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() <fim_suffix>
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoA, sized [N,M].
"""
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
"""
Pairwise distance between N points and M boxes. The distance between a
point and a box is represented by the distance from the point to 4 edges
of the box. Distances are all positive when the point is inside the box.
Args:
points: Nx2 coordinates. Each row is (x, y)
boxes: M boxes
Returns:
Tensor: distances of size (N, M, 4). The 4 values are distances from
the point to the left, top, right, bottom of the box.
"""
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
<fim_middle># [M] | # [M] | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/structures/rotated_boxes.py
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
# UniRef/detectron2/structures/masks.py
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
# UniRef/detectron2/modeling/anchor_generator.py
def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device):
grid_height, grid_width = size
shifts_x = torch.arange(
offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
return shift_x, shift_y
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
<fim_suffix>
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoA, sized [N,M].
"""
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
"""
Pairwise distance between N points and M boxes. The distance between a
point and a box is represented by the distance from the point to 4 edges
of the box. Distances are all positive when the point is inside the box.
Args:
points: Nx2 coordinates. Each row is (x, y)
boxes: M boxes
Returns:
Tensor: distances of size (N, M, 4). The 4 values are distances from
the point to the left, top, right, bottom of the box.
"""
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
<fim_middle># the inputs (and consequently confuses jit) | # the inputs (and consequently confuses jit) | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/export/caffe2_export.py
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info(
"Exporting a {} model via ONNX ...".format(type(model).__name__)
+ " Some warnings from ONNX are expected and are usually not to worry about."
)
onnx_model = export_onnx_model(model, (tensor_inputs,))
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
logger.info(
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
)
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
if any(t.device.type != "cpu" for t in tensor_inputs):
fuse_copy_between_cpu_and_gpu(predict_net)
remove_dead_end_ops(init_net)
_assign_device_option(predict_net, init_net, tensor_inputs)
params, device_options = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
init_net = construct_init_net_from_params(params, device_options)
group_norm_replace_aten_with_caffe2(predict_net)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
# UniRef/projects/UniRef/uniref/models/segment_anything/modeling/sam.py
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
dtype = masks.dtype
masks = F.interpolate(
masks.float(),
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
# masks = masks.to(dtype)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(
masks, original_size, mode="bilinear", align_corners=False
)
return masks
# UniRef/detectron2/data/build.py
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
<fim_suffix>
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle># RPN hidden representation conv | # RPN hidden representation conv | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
# UniRef/external/lvos-evaluation/lvos/metrics.py
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
# UniRef/external/lvos-evaluation/lvos/metrics.py
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
<fim_suffix>
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle># Get the pixel boundaries of both masks | # Get the pixel boundaries of both masks | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/logger.py
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
# UniRef/detectron2/evaluation/coco_evaluation.py
def _derive_refcoco_results(self, coco_eval, iou_type):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {"bbox": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"],
"segm": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"]
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float("nan")
for idx, metric in enumerate(metrics)
}
ious = np.array([v for (k, v) in coco_eval.ious.items()])
total_intersection_area = coco_eval.total_intersection_area
total_union_area = coco_eval.total_union_area
iou_list = coco_eval.iou_list
# compute metrics
results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
results["P@0.6"] = np.sum(ious > 0.6) / len(ious) * 100
results["P@0.7"] = np.sum(ious > 0.7) / len(ious) * 100
results["P@0.8"] = np.sum(ious > 0.8) / len(ious) * 100
results["P@0.9"] = np.sum(ious > 0.9) / len(ious) * 100
results["oIoU"] = total_intersection_area / total_union_area * 100
results["mIoU"] = np.mean(ious) * 100
# if iou_type == "bbox":
# results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
# elif iou_type == "segm":
# results["mIoU"] = np.mean(ious) * 100
# else:
# raise ValueError("Unsupported iou_type!")
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
# UniRef/detectron2/data/build.py
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = np.asarray(
[x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
<fim_suffix>
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle># ckpt_key string, if it matches | # ckpt_key string, if it matches | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign -1 for IoU above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1
return cost_matrix
# UniRef/detectron2/tracking/utils.py
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
return bbox_pairs
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from typing import List
import numpy as np
from .base_tracker import TRACKER_HEADS_REGISTRY
from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker
from detectron2.config import configurable, CfgNode as CfgNode_
@TRACKER_HEADS_REGISTRY.register()
class IOUWeightedHungarianBBoxIOUTracker(VanillaHungarianBBoxIOUTracker):
"""
A tracker using IoU as weight in Hungarian algorithm, also known
as Munkres or Kuhn-Munkres algorithm
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(
video_height=video_height,
video_width=video_width,
max_num_instances=max_num_instances,
max_lost_frame_count=max_lost_frame_count,
min_box_rel_dim=min_box_rel_dim,
min_instance_period=min_instance_period,
track_iou_threshold=track_iou_threshold
)
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
<fim_suffix>
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"]
return cost_matrix
<fim_middle># assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost | # assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
# UniRef/external/lvos-evaluation/lvos/metrics.py
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
# UniRef/detectron2/layers/rotated_boxes.py
def pairwise_iou_rotated(boxes1, boxes2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in
(x_center, y_center, width, height, angle) format.
Arguments:
boxes1 (Tensor[N, 5])
boxes2 (Tensor[M, 5])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2)
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
<fim_suffix>
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle># Intersection between all sets | # Intersection between all sets | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/export/caffe2_export.py
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info(
"Exporting a {} model via ONNX ...".format(type(model).__name__)
+ " Some warnings from ONNX are expected and are usually not to worry about."
)
onnx_model = export_onnx_model(model, (tensor_inputs,))
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
logger.info(
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
)
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
if any(t.device.type != "cpu" for t in tensor_inputs):
fuse_copy_between_cpu_and_gpu(predict_net)
remove_dead_end_ops(init_net)
_assign_device_option(predict_net, init_net, tensor_inputs)
params, device_options = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
init_net = construct_init_net_from_params(params, device_options)
group_norm_replace_aten_with_caffe2(predict_net)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
# UniRef/projects/UniRef/uniref/models/segment_anything/modeling/sam.py
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
dtype = masks.dtype
masks = F.interpolate(
masks.float(),
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
# masks = masks.to(dtype)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(
masks, original_size, mode="bilinear", align_corners=False
)
return masks
# UniRef/detectron2/data/build.py
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
<fim_suffix>
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle># remove the meaningless prediction weight for background class | # remove the meaningless prediction weight for background class | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg <fim_suffix>
<fim_middle># return as-is if don't know what to do | # return as-is if don't know what to do | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/export/caffe2_export.py
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info(
"Exporting a {} model via ONNX ...".format(type(model).__name__)
+ " Some warnings from ONNX are expected and are usually not to worry about."
)
onnx_model = export_onnx_model(model, (tensor_inputs,))
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
logger.info(
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
)
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
if any(t.device.type != "cpu" for t in tensor_inputs):
fuse_copy_between_cpu_and_gpu(predict_net)
remove_dead_end_ops(init_net)
_assign_device_option(predict_net, init_net, tensor_inputs)
params, device_options = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
init_net = construct_init_net_from_params(params, device_options)
group_norm_replace_aten_with_caffe2(predict_net)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
# UniRef/projects/UniRef/uniref/models/segment_anything/modeling/sam.py
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
dtype = masks.dtype
masks = F.interpolate(
masks.float(),
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
# masks = masks.to(dtype)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(
masks, original_size, mode="bilinear", align_corners=False
)
return masks
# UniRef/detectron2/data/build.py
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
<fim_suffix>
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle># -------------------------------------------------------------------------- | # -------------------------------------------------------------------------- | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/util/box_ops.py
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float, device=masks.device)
x = torch.arange(0, w, dtype=torch.float, device=masks.device)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
# UniRef/projects/UniRef/uniref/models/deformable_detr/deformable_transformer.py
def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals
# UniRef/detectron2/export/shared.py
def get_producer_map(ssa):
"""
Return dict from versioned blob to (i, j),
where i is index of producer op, j is the index of output of that op.
"""
producer_map = {}
for i in range(len(ssa)):
outputs = ssa[i][1]
for j, outp in enumerate(outputs):
producer_map[outp] = (i, j)
return producer_map
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
<fim_suffix>
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy | for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/logger.py
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
# UniRef/detectron2/evaluation/coco_evaluation.py
def _derive_refcoco_results(self, coco_eval, iou_type):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {"bbox": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"],
"segm": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"]
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float("nan")
for idx, metric in enumerate(metrics)
}
ious = np.array([v for (k, v) in coco_eval.ious.items()])
total_intersection_area = coco_eval.total_intersection_area
total_union_area = coco_eval.total_union_area
iou_list = coco_eval.iou_list
# compute metrics
results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
results["P@0.6"] = np.sum(ious > 0.6) / len(ious) * 100
results["P@0.7"] = np.sum(ious > 0.7) / len(ious) * 100
results["P@0.8"] = np.sum(ious > 0.8) / len(ious) * 100
results["P@0.9"] = np.sum(ious > 0.9) / len(ious) * 100
results["oIoU"] = total_intersection_area / total_union_area * 100
results["mIoU"] = np.mean(ious) * 100
# if iou_type == "bbox":
# results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
# elif iou_type == "segm":
# results["mIoU"] = np.mean(ious) * 100
# else:
# raise ValueError("Unsupported iou_type!")
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
# UniRef/detectron2/data/build.py
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = np.asarray(
[x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
<fim_suffix>
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle>for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model | for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/projects/UniRef/uniref/util/box_ops.py
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float, device=masks.device)
x = torch.arange(0, w, dtype=torch.float, device=masks.device)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
# UniRef/detectron2/structures/boxes.py
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
# UniRef/detectron2/data/dataset_mapper.py
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
<fim_suffix>
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
) | for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
# UniRef/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
"""
Build the cost matrix for assignment problem
(https://en.wikipedia.org/wiki/Assignment_problem)
Args:
instances: D2 Instances, for current frame predictions
prev_instances: D2 Instances, for previous frame predictions
Return:
the cost matrix in numpy array
"""
assert instances is not None and prev_instances is not None
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
bbox_pairs = create_prediction_pairs(
instances,
self._prev_instances,
iou_all,
threshold=self._track_iou_threshold
)
# assign large cost value to make sure pair below IoU threshold won't be matched
cost_matrix = np.full((len(instances), len(prev_instances)), LARGE_COST_VALUE)
return self.assign_cost_matrix_values(cost_matrix, bbox_pairs)
# UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"]
return cost_matrix
"""
#!/usr/bin/env python3
from detectron2.structures import Instances
import numpy as np
from typing import List
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
<fim_suffix>
return bbox_pairs
LARGE_COST_VALUE = 100000
<fim_middle>for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
) | for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/utils.py
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
return bbox_pairs
# UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"]
return cost_matrix
# UniRef/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign -1 for IoU above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1
return cost_matrix
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
<fim_suffix>
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
) | for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/hungarian_tracker.py
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
# UniRef/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
"""
Build the cost matrix for assignment problem
(https://en.wikipedia.org/wiki/Assignment_problem)
Args:
instances: D2 Instances, for current frame predictions
prev_instances: D2 Instances, for previous frame predictions
Return:
the cost matrix in numpy array
"""
assert instances is not None and prev_instances is not None
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
bbox_pairs = create_prediction_pairs(
instances,
self._prev_instances,
iou_all,
threshold=self._track_iou_threshold
)
# assign large cost value to make sure pair below IoU threshold won't be matched
cost_matrix = np.full((len(instances), len(prev_instances)), LARGE_COST_VALUE)
return self.assign_cost_matrix_values(cost_matrix, bbox_pairs)
# UniRef/detectron2/tracking/utils.py
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
return bbox_pairs
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
<fim_suffix>
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) | for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
# UniRef/detectron2/data/transforms/augmentation_impl.py
def get_transform(self, image):
h, w = image.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
<fim_suffix>
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0 | for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0 | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/utils.py
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
return bbox_pairs
# UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"]
return cost_matrix
# UniRef/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
"""
Based on IoU for each pair of bbox, assign the associated value in cost matrix
Args:
cost_matrix: np.ndarray, initialized 2D array with target dimensions
bbox_pairs: list of bbox pair, in each pair, iou value is stored
Return:
np.ndarray, cost_matrix with assigned values
"""
for pair in bbox_pairs:
# assign -1 for IoU above threshold pairs, algorithms will minimize cost
cost_matrix[pair["idx"]][pair["prev_idx"]] = -1
return cost_matrix
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
from typing import List
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances
from detectron2.structures.boxes import pairwise_iou
from ..config.config import CfgNode as CfgNode_
from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY
@TRACKER_HEADS_REGISTRY.register()
class BBoxIOUTracker(BaseTracker):
"""
A bounding box tracker to assign ID based on IoU between current and previous instances
"""
@configurable
def __init__(
self,
*,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
track_iou_threshold: float = 0.5,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
track_iou_threshold: iou threshold, below this number a bbox pair is removed
from tracking
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
self._track_iou_threshold = track_iou_threshold
@classmethod
def from_config(cls, cfg: CfgNode_):
"""
Old style initialization using CfgNode
Args:
cfg: D2 CfgNode, config file
Return:
dictionary storing arguments for __init__ method
"""
assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS
assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS
video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT")
video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH")
max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02)
min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1)
track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5)
return {
"_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker",
"video_height": video_height,
"video_width": video_width,
"max_num_instances": max_num_instances,
"max_lost_frame_count": max_lost_frame_count,
"min_box_rel_dim": min_box_rel_dim,
"min_instance_period": min_instance_period,
"track_iou_threshold": track_iou_threshold
}
def update(self, instances: Instances) -> Instances:
"""
See BaseTracker description
"""
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
# calculate IoU of all bbox pairs
iou_all = pairwise_iou(
boxes1=instances.pred_boxes,
boxes2=self._prev_instances.pred_boxes,
)
# sort IoU in descending order
bbox_pairs = self._create_prediction_pairs(instances, iou_all)
# assign previous ID to current bbox if IoU > track_iou_threshold
self._reset_fields()
for bbox_pair in bbox_pairs:
idx = bbox_pair["idx"]
prev_id = bbox_pair["prev_id"]
if idx in self._matched_idx \
or prev_id in self._matched_ID \
or bbox_pair["IoU"] < self._track_iou_threshold:
continue
instances.ID[idx] = prev_id
instances.ID_period[idx] = bbox_pair["prev_period"] + 1
instances.lost_frame_count[idx] = 0
self._matched_idx.add(idx)
self._matched_ID.add(prev_id)
self._untracked_prev_idx.remove(bbox_pair["prev_idx"])
instances = self._assign_new_id(instances)
instances = self._merge_untracked_instances(instances)
self._prev_instances = copy.deepcopy(instances)
return instances
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
<fim_suffix>
return bbox_pairs
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _reset_fields(self):
"""
Before each uodate call, reset fields first
"""
self._matched_idx = set()
self._matched_ID = set()
self._untracked_prev_idx = set(range(len(self._prev_instances)))
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
def _merge_untracked_instances(self, instances: Instances) -> Instances:
"""
For untracked previous instances, under certain condition, still keep them
in tracking and merge with the current instances.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances merging current instances and instances from previous
frame decided to keep tracking
"""
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
for idx in self._untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
) | for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _assign_new_id(self, instances: Instances) -> Instances:
"""
For each untracked instance, assign a new id
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with new ID assigned
"""
untracked_idx = set(range(len(instances))).difference(self._matched_idx)
for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0
return instances
# UniRef/detectron2/tracking/utils.py
def create_prediction_pairs(
instances: Instances,
prev_instances: Instances,
iou_all: np.ndarray,
threshold: float = 0.5,
) -> List:
"""
Args:
instances: predictions from current frame
prev_instances: predictions from previous frame
iou_all: 2D numpy array containing iou for each bbox pair
threshold: below the threshold, doesn't consider the pair of bbox is valid
Return:
List of bbox pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(prev_instances)):
if iou_all[i, j] < threshold:
continue
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": prev_instances.ID_period[j],
}
)
return bbox_pairs
# UniRef/detectron2/tracking/bbox_iou_tracker.py
def _create_prediction_pairs(
self, instances: Instances, iou_all: np.ndarray
) -> List:
"""
For all instances in previous and current frames, create pairs. For each
pair, store index of the instance in current frame predcitions, index in
previous predictions, ID in previous predictions, IoU of the bboxes in this
pair, period in previous predictions.
Args:
instances: D2 Instances, for predictions of the current frame
iou_all: IoU for all bboxes pairs
Return:
A list of IoU for all pairs
"""
bbox_pairs = []
for i in range(len(instances)):
for j in range(len(self._prev_instances)):
bbox_pairs.append(
{
"idx": i,
"prev_idx": j,
"prev_id": self._prev_instances.ID[j],
"IoU": iou_all[i, j],
"prev_period": self._prev_instances.ID_period[j],
}
)
return bbox_pairs
"""
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import copy
import numpy as np
import torch
from detectron2.structures import Boxes, Instances
from .base_tracker import BaseTracker
from scipy.optimize import linear_sum_assignment
from ..config.config import CfgNode as CfgNode_
from typing import Dict
from detectron2.config import configurable
class BaseHungarianTracker(BaseTracker):
"""
A base class for all Hungarian trackers
"""
@configurable
def __init__(
self,
video_height: int,
video_width: int,
max_num_instances: int = 200,
max_lost_frame_count: int = 0,
min_box_rel_dim: float = 0.02,
min_instance_period: int = 1,
**kwargs
):
"""
Args:
video_height: height the video frame
video_width: width of the video frame
max_num_instances: maximum number of id allowed to be tracked
max_lost_frame_count: maximum number of frame an id can lost tracking
exceed this number, an id is considered as lost
forever
min_box_rel_dim: a percentage, smaller than this dimension, a bbox is
removed from tracking
min_instance_period: an instance will be shown after this number of period
since its first showing up in the video
"""
super().__init__(**kwargs)
self._video_height = video_height
self._video_width = video_width
self._max_num_instances = max_num_instances
self._max_lost_frame_count = max_lost_frame_count
self._min_box_rel_dim = min_box_rel_dim
self._min_instance_period = min_instance_period
@classmethod
def from_config(cls, cfg: CfgNode_) -> Dict:
raise NotImplementedError("Calling HungarianTracker::from_config")
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
raise NotImplementedError("Calling HungarianTracker::build_matrix")
def update(self, instances: Instances) -> Instances:
if instances.has("pred_keypoints"):
raise NotImplementedError("Need to add support for keypoints")
instances = self._initialize_extra_fields(instances)
if self._prev_instances is not None:
self._untracked_prev_idx = set(range(len(self._prev_instances)))
cost_matrix = self.build_cost_matrix(instances, self._prev_instances)
matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix)
instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx)
instances = self._process_unmatched_idx(instances, matched_idx)
instances = self._process_unmatched_prev_idx(instances, matched_prev_idx)
self._prev_instances = copy.deepcopy(instances)
return instances
def _initialize_extra_fields(self, instances: Instances) -> Instances:
"""
If input instances don't have ID, ID_period, lost_frame_count fields,
this method is used to initialize these fields.
Args:
instances: D2 Instances, for predictions of the current frame
Return:
D2 Instances with extra fields added
"""
if not instances.has("ID"):
instances.set("ID", [None] * len(instances))
if not instances.has("ID_period"):
instances.set("ID_period", [None] * len(instances))
if not instances.has("lost_frame_count"):
instances.set("lost_frame_count", [None] * len(instances))
if self._prev_instances is None:
instances.ID = list(range(len(instances)))
self._id_count += len(instances)
instances.ID_period = [1] * len(instances)
instances.lost_frame_count = [0] * len(instances)
return instances
def _process_matched_idx(
self,
instances: Instances,
matched_idx: np.ndarray,
matched_prev_idx: np.ndarray
) -> Instances:
assert matched_idx.size == matched_prev_idx.size
for i in range(matched_idx.size):
instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]]
instances.ID_period[matched_idx[i]] = \
self._prev_instances.ID_period[matched_prev_idx[i]] + 1
instances.lost_frame_count[matched_idx[i]] = 0
return instances
def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances:
untracked_idx = set(range(len(instances))).difference(set(matched_idx))
<fim_suffix>
return instances
def _process_unmatched_prev_idx(
self,
instances: Instances,
matched_prev_idx:
np.ndarray
) -> Instances:
untracked_instances = Instances(
image_size=instances.image_size,
pred_boxes=[],
pred_masks=[],
pred_classes=[],
scores=[],
ID=[],
ID_period=[],
lost_frame_count=[],
)
prev_bboxes = list(self._prev_instances.pred_boxes)
prev_classes = list(self._prev_instances.pred_classes)
prev_scores = list(self._prev_instances.scores)
prev_ID_period = self._prev_instances.ID_period
if instances.has("pred_masks"):
prev_masks = list(self._prev_instances.pred_masks)
untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx))
for idx in untracked_prev_idx:
x_left, y_top, x_right, y_bot = prev_bboxes[idx]
if (
(1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)
or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)
or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count
or prev_ID_period[idx] <= self._min_instance_period
):
continue
untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))
untracked_instances.pred_classes.append(int(prev_classes[idx]))
untracked_instances.scores.append(float(prev_scores[idx]))
untracked_instances.ID.append(self._prev_instances.ID[idx])
untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])
untracked_instances.lost_frame_count.append(
self._prev_instances.lost_frame_count[idx] + 1
)
if instances.has("pred_masks"):
untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))
untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))
untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)
untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)
if instances.has("pred_masks"):
untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)
else:
untracked_instances.remove("pred_masks")
return Instances.cat(
[
instances,
untracked_instances,
]
)
<fim_middle>for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0 | for idx in untracked_idx:
instances.ID[idx] = self._id_count
self._id_count += 1
instances.ID_period[idx] = 1
instances.lost_frame_count[idx] = 0 | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
# UniRef/detectron2/utils/visualizer.py
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
# UniRef/detectron2/utils/colormap.py
def random_colors(N, rgb=False, maximum=255):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
<fim_suffix>
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle>for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1 | for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1 | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
<fim_suffix>
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>from omegaconf import ListConfig | from omegaconf import ListConfig | IMPORT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/layers/roi_align.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/layers/roi_align_rotated.py
def __init__(self, output_size, spatial_scale, sampling_ratio):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
ROIAlignRotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
# UniRef/detectron2/layers/nms.py
def batched_nms(
boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float
):
"""
Same as torchvision.ops.boxes.batched_nms, but with float().
"""
assert boxes.shape[-1] == 4
# Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)
# to decide whether to use coordinate trick or for loop to implement batched_nms. So we
# just call it directly.
# Fp16 does not have enough range for batched NMS, so adding float().
return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)
# UniRef/detectron2/layers/mask_ops.py
def paste_mask_in_image_old(mask, box, img_h, img_w, threshold):
"""
Paste a single mask in an image.
This is a per-box implementation of :func:`paste_masks_in_image`.
This function has larger quantization error due to incorrect pixel
modeling and is not used any more.
Args:
mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single
object instance. Values are in [0, 1].
box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners
of the object instance.
img_h, img_w (int): Image height and width.
threshold (float): Mask binarization threshold in [0, 1].
Returns:
im_mask (Tensor):
The resized and binarized object mask pasted into the original
image plane (a tensor of shape (img_h, img_w)).
"""
# Conversion from continuous box coordinates to discrete pixel coordinates
# via truncation (cast to int32). This determines which pixels to paste the
# mask onto.
box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion
# An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to
# a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1
# pixels (not x1 - x0 pixels).
samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width
samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height
# Resample the mask from it's original grid to the new samples_w x samples_h grid
mask = Image.fromarray(mask.cpu().numpy())
mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR)
mask = np.array(mask, copy=False)
if threshold >= 0:
mask = np.array(mask > threshold, dtype=np.uint8)
mask = torch.from_numpy(mask)
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = torch.from_numpy(mask * 255).to(torch.uint8)
im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, img_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, img_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from torch import nn
from torchvision.ops import roi_align
# NOTE: torchvision's RoIAlign has a different default aligned=False
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
aligned (bool): if False, use the legacy implementation in
Detectron. If True, align the results more perfectly.
Note:
The meaning of aligned=True:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect alignment
(relative to our pixel model) when performing bilinear interpolation.
With `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5
prior to calling roi_align. This produces the correct neighbors; see
detectron2/tests/test_roi_align.py for verification.
The difference does not make a difference to the model's performance if
ROIAlign is used together with conv layers.
"""
super().__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
<fim_suffix>
version = tuple(int(x) for x in __version__.split(".")[:2])
# https://github.com/pytorch/vision/pull/2438
assert version >= (0, 7), "Require torchvision >= 0.7"
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
"""
assert rois.dim() == 2 and rois.size(1) == 5
if input.is_quantized:
input = input.dequantize()
return roi_align(
input,
rois.to(dtype=input.dtype),
self.output_size,
self.spatial_scale,
self.sampling_ratio,
self.aligned,
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
<fim_middle>from torchvision import __version__ | from torchvision import __version__ | IMPORT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/external/lvos-evaluation/lvos/metrics.py
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
# UniRef/external/lvos-evaluation/lvos/metrics.py
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
# UniRef/external/lvos-evaluation/lvos/metrics.py
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
"""
import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(np.bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(np.bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
<fim_suffix>
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <dmartin@eecs.berkeley.edu>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
<fim_middle>from skimage.morphology import disk | from skimage.morphology import disk | IMPORT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/utils/registry.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/catalog.py
def get(self, name):
"""
Call the registered function and return its results.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
Returns:
list[dict]: dataset annotations.
"""
try:
f = self[name]
except KeyError as e:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(list(self.keys()))
)
) from e
return f()
# UniRef/detectron2/data/detection_utils.py
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
# UniRef/detectron2/structures/instances.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:
# from hydra.utils import get_method - will print many errors
<fim_suffix>
except ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e
else:
obj = _locate(name) # it raises if fails
return obj
<fim_middle>from hydra.utils import _locate | from hydra.utils import _locate | IMPORT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/compat.py
def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
"""
Upgrade a config from its current version to a newer version.
Args:
cfg (CfgNode):
to_version (int): defaults to the latest version.
"""
cfg = cfg.clone()
if to_version is None:
to_version = _C.VERSION
assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version):
converter = globals()["ConverterV" + str(k + 1)]
converter.upgrade(cfg)
cfg.VERSION = k + 1
return cfg
# UniRef/demo/demo.py
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
# UniRef/detectron2/model_zoo/model_zoo.py
def get(config_path, trained: bool = False, device: Optional[str] = None):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): see :func:`get_config`.
device (str or None): overwrite the device in config, if given.
Returns:
nn.Module: a detectron2 model. Will be in training mode.
Example:
::
from detectron2 import model_zoo
model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True)
"""
cfg = get_config(config_path, trained)
if device is None and not torch.cuda.is_available():
device = "cpu"
if device is not None and isinstance(cfg, CfgNode):
cfg.MODEL.DEVICE = device
if isinstance(cfg, CfgNode):
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
model = instantiate(cfg.model)
if device is not None:
model = model.to(device)
if "train" in cfg and "init_checkpoint" in cfg.train:
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
return model
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
<fim_suffix>
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>from .defaults import _C | from .defaults import _C | IMPORT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/logger.py
def log_every_n_seconds(lvl, msg, n=1, *, name=None):
"""
Log no more than once per n seconds.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
last_logged = _LOG_TIMER.get(key, None)
current_time = time.time()
if last_logged is None or current_time - last_logged >= n:
logging.getLogger(name or caller_module).log(lvl, msg)
_LOG_TIMER[key] = current_time
# UniRef/detectron2/evaluation/coco_evaluation.py
def _derive_refcoco_results(self, coco_eval, iou_type):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {"bbox": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"],
"segm": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"]
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float("nan")
for idx, metric in enumerate(metrics)
}
ious = np.array([v for (k, v) in coco_eval.ious.items()])
total_intersection_area = coco_eval.total_intersection_area
total_union_area = coco_eval.total_union_area
iou_list = coco_eval.iou_list
# compute metrics
results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
results["P@0.6"] = np.sum(ious > 0.6) / len(ious) * 100
results["P@0.7"] = np.sum(ious > 0.7) / len(ious) * 100
results["P@0.8"] = np.sum(ious > 0.8) / len(ious) * 100
results["P@0.9"] = np.sum(ious > 0.9) / len(ious) * 100
results["oIoU"] = total_intersection_area / total_union_area * 100
results["mIoU"] = np.mean(ious) * 100
# if iou_type == "bbox":
# results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100
# elif iou_type == "segm":
# results["mIoU"] = np.mean(ious) * 100
# else:
# raise ValueError("Unsupported iou_type!")
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
# UniRef/detectron2/data/build.py
def print_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
annos = entry["annotations"]
classes = np.asarray(
[x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
<fim_suffix>
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle>def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b) | def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/visualizer.py
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
# UniRef/detectron2/utils/visualizer.py
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
# UniRef/detectron2/layers/mask_ops.py
def paste_masks_in_image(
masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5
):
"""
Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Note:
This is a complicated but more accurate implementation. In actual deployment, it is
often enough to use a faster but less accurate implementation.
See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
Args:
masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
detected object instances in the image and Hmask, Wmask are the mask width and mask
height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
boxes[i] and masks[i] correspond to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
N = len(masks)
if N == 0:
return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
if not isinstance(boxes, torch.Tensor):
boxes = boxes.tensor
device = boxes.device
assert len(boxes) == N, boxes.shape
img_h, img_w = image_shape
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == "cpu" or torch.jit.is_scripting():
# CPU is most efficient when they are pasted one by one with skip_empty=True
# so that it performs minimal number of operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks, but may have memory issue
# int(img_h) because shape may be tensors in tracing
num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (
num_chunks <= N
), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
img_masks = torch.zeros(
N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
if torch.jit.is_scripting(): # Scripting does not use the optimized codepath
img_masks[inds] = masks_chunk
else:
img_masks[(inds,) + spatial_inds] = masks_chunk
return img_masks
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
<fim_suffix>
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance | def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/export/caffe2_export.py
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info(
"Exporting a {} model via ONNX ...".format(type(model).__name__)
+ " Some warnings from ONNX are expected and are usually not to worry about."
)
onnx_model = export_onnx_model(model, (tensor_inputs,))
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
logger.info(
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
)
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
if any(t.device.type != "cpu" for t in tensor_inputs):
fuse_copy_between_cpu_and_gpu(predict_net)
remove_dead_end_ops(init_net)
_assign_device_option(predict_net, init_net, tensor_inputs)
params, device_options = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
init_net = construct_init_net_from_params(params, device_options)
group_norm_replace_aten_with_caffe2(predict_net)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
# UniRef/projects/UniRef/uniref/models/segment_anything/modeling/sam.py
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
dtype = masks.dtype
masks = F.interpolate(
masks.float(),
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
# masks = masks.to(dtype)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(
masks, original_size, mode="bilinear", align_corners=False
)
return masks
# UniRef/detectron2/data/build.py
def load_proposals_into_dataset(dataset_dicts, proposal_file):
"""
Load precomputed object proposals into the dataset.
The proposal file should be a pickled dict with the following keys:
- "ids": list[int] or list[str], the image ids
- "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
- "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
corresponding to the boxes.
- "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
proposal_file (str): file path of pre-computed proposals, in pkl format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger = logging.getLogger(__name__)
logger.info("Loading proposals from: {}".format(proposal_file))
with PathManager.open(proposal_file, "rb") as f:
proposals = pickle.load(f, encoding="latin1")
# Rename the key names in D1 proposal files
rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
for key in rename_keys:
if key in proposals:
proposals[rename_keys[key]] = proposals.pop(key)
# Fetch the indexes of all proposals that are in the dataset
# Convert image_id to str since they could be int.
img_ids = set({str(record["image_id"]) for record in dataset_dicts})
id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
# Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
for record in dataset_dicts:
# Get the index of the proposal
i = id_to_index[str(record["image_id"])]
boxes = proposals["boxes"][i]
objectness_logits = proposals["objectness_logits"][i]
# Sort the proposals in descending order of the scores
inds = objectness_logits.argsort()[::-1]
record["proposal_boxes"] = boxes[inds]
record["proposal_objectness_logits"] = objectness_logits[inds]
record["proposal_bbox_mode"] = bbox_mode
return dataset_dicts
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
<fim_suffix>
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle>def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name | def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/events.py
def latest(self):
"""
Returns:
dict[str -> (float, int)]: mapping from the name of each scalar to the most
recent value and the iteration number its added.
"""
return self._latest_scalars
# UniRef/detectron2/structures/instances.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
# UniRef/projects/UniRef/uniref/models/deformable_detr/backbone.py
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in sorted(xs.items()):
out.append(x)
# position encoding
for x in out:
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
<fim_suffix>
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret
<fim_middle>def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix | def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/transforms/augmentation.py
def _get_aug_input_args(aug, aug_input) -> List[Any]:
"""
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
"""
if aug.input_args is None:
# Decide what attributes are needed automatically
prms = list(inspect.signature(aug.get_transform).parameters.items())
# The default behavior is: if there is one parameter, then its "image"
# (work automatically for majority of use cases, and also avoid BC breaking),
# Otherwise, use the argument names.
if len(prms) == 1:
names = ("image",)
else:
names = []
for name, prm in prms:
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
raise TypeError(
f""" \
The default implementation of `{type(aug)}.__call__` does not allow \
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
If arguments are unknown, reimplement `__call__` instead. \
"""
)
names.append(name)
aug.input_args = tuple(names)
args = []
for f in aug.input_args:
try:
args.append(getattr(aug_input, f))
except AttributeError as e:
raise AttributeError(
f"{type(aug)}.get_transform needs input attribute '{f}', "
f"but it is not an attribute of {type(aug_input)}!"
) from e
return args
# UniRef/detectron2/config/instantiate.py
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
# UniRef/detectron2/utils/memory.py
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu")
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
<fim_suffix>
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs) | def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/structures/masks.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/utils/visualizer.py
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5,
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = 0
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
# UniRef/detectron2/utils/visualizer.py
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): dataset metadata (e.g. class names and colors)
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
self.keypoint_threshold = _KEYPOINT_THRESHOLD
# UniRef/detectron2/layers/mask_ops.py
def paste_masks_in_image(
masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5
):
"""
Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
The location, height, and width for pasting each mask is determined by their
corresponding bounding boxes in boxes.
Note:
This is a complicated but more accurate implementation. In actual deployment, it is
often enough to use a faster but less accurate implementation.
See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
Args:
masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
detected object instances in the image and Hmask, Wmask are the mask width and mask
height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
boxes[i] and masks[i] correspond to the same object instance.
image_shape (tuple): height, width
threshold (float): A threshold in [0, 1] for converting the (soft) masks to
binary masks.
Returns:
img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
number of detected object instances and Himage, Wimage are the image width
and height. img_masks[i] is a binary mask for object instance i.
"""
assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
N = len(masks)
if N == 0:
return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
if not isinstance(boxes, torch.Tensor):
boxes = boxes.tensor
device = boxes.device
assert len(boxes) == N, boxes.shape
img_h, img_w = image_shape
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == "cpu" or torch.jit.is_scripting():
# CPU is most efficient when they are pasted one by one with skip_empty=True
# so that it performs minimal number of operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks, but may have memory issue
# int(img_h) because shape may be tensors in tracing
num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (
num_chunks <= N
), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
img_masks = torch.zeros(
N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
if torch.jit.is_scripting(): # Scripting does not use the optimized codepath
img_masks[inds] = masks_chunk
else:
img_masks[(inds,) + spatial_inds] = masks_chunk
return img_masks
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
<fim_suffix>
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks)
<fim_middle>def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64") | def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64") | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/transforms/augmentation.py
def _get_aug_input_args(aug, aug_input) -> List[Any]:
"""
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
"""
if aug.input_args is None:
# Decide what attributes are needed automatically
prms = list(inspect.signature(aug.get_transform).parameters.items())
# The default behavior is: if there is one parameter, then its "image"
# (work automatically for majority of use cases, and also avoid BC breaking),
# Otherwise, use the argument names.
if len(prms) == 1:
names = ("image",)
else:
names = []
for name, prm in prms:
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
raise TypeError(
f""" \
The default implementation of `{type(aug)}.__call__` does not allow \
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
If arguments are unknown, reimplement `__call__` instead. \
"""
)
names.append(name)
aug.input_args = tuple(names)
args = []
for f in aug.input_args:
try:
args.append(getattr(aug_input, f))
except AttributeError as e:
raise AttributeError(
f"{type(aug)}.get_transform needs input attribute '{f}', "
f"but it is not an attribute of {type(aug_input)}!"
) from e
return args
# UniRef/detectron2/config/instantiate.py
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
# UniRef/detectron2/utils/memory.py
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu")
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
<fim_suffix>
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs) | def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/utils/registry.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/catalog.py
def get(self, name):
"""
Call the registered function and return its results.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
Returns:
list[dict]: dataset annotations.
"""
try:
f = self[name]
except KeyError as e:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(list(self.keys()))
)
) from e
return f()
# UniRef/detectron2/data/detection_utils.py
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
# UniRef/detectron2/structures/instances.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
<fim_suffix>
except ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e
else:
obj = _locate(name) # it raises if fails
return obj
<fim_middle>try:
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate | try:
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
<fim_suffix>
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>try:
return cls(**cfg) | try:
return cls(**cfg) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
<fim_suffix>
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>try:
cls_name = cls.__module__ + "." + cls.__qualname__ | try:
cls_name = cls.__module__ + "." + cls.__qualname__ | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/transforms/augmentation.py
def _get_aug_input_args(aug, aug_input) -> List[Any]:
"""
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
"""
if aug.input_args is None:
# Decide what attributes are needed automatically
prms = list(inspect.signature(aug.get_transform).parameters.items())
# The default behavior is: if there is one parameter, then its "image"
# (work automatically for majority of use cases, and also avoid BC breaking),
# Otherwise, use the argument names.
if len(prms) == 1:
names = ("image",)
else:
names = []
for name, prm in prms:
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
raise TypeError(
f""" \
The default implementation of `{type(aug)}.__call__` does not allow \
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
If arguments are unknown, reimplement `__call__` instead. \
"""
)
names.append(name)
aug.input_args = tuple(names)
args = []
for f in aug.input_args:
try:
args.append(getattr(aug_input, f))
except AttributeError as e:
raise AttributeError(
f"{type(aug)}.get_transform needs input attribute '{f}', "
f"but it is not an attribute of {type(aug_input)}!"
) from e
return args
# UniRef/detectron2/config/instantiate.py
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
# UniRef/detectron2/utils/memory.py
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu")
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
<fim_suffix>
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>try:
from_config_func = type(self).from_config | try:
from_config_func = type(self).from_config | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
<fim_suffix>
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
<fim_middle>except Exception:
# target could be anything, so the above could fail
cls_name = str(cls) | except Exception:
# target could be anything, so the above could fail
cls_name = str(cls) | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/utils/registry.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/catalog.py
def get(self, name):
"""
Call the registered function and return its results.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
Returns:
list[dict]: dataset annotations.
"""
try:
f = self[name]
except KeyError as e:
raise KeyError(
"Dataset '{}' is not registered! Available datasets are: {}".format(
name, ", ".join(list(self.keys()))
)
) from e
return f()
# UniRef/detectron2/data/detection_utils.py
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
# UniRef/detectron2/structures/instances.py
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate
<fim_suffix>
else:
obj = _locate(name) # it raises if fails
return obj
<fim_middle>except ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e | except ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/config.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/data/transforms/augmentation.py
def _get_aug_input_args(aug, aug_input) -> List[Any]:
"""
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
"""
if aug.input_args is None:
# Decide what attributes are needed automatically
prms = list(inspect.signature(aug.get_transform).parameters.items())
# The default behavior is: if there is one parameter, then its "image"
# (work automatically for majority of use cases, and also avoid BC breaking),
# Otherwise, use the argument names.
if len(prms) == 1:
names = ("image",)
else:
names = []
for name, prm in prms:
if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
raise TypeError(
f""" \
The default implementation of `{type(aug)}.__call__` does not allow \
`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
If arguments are unknown, reimplement `__call__` instead. \
"""
)
names.append(name)
aug.input_args = tuple(names)
args = []
for f in aug.input_args:
try:
args.append(getattr(aug_input, f))
except AttributeError as e:
raise AttributeError(
f"{type(aug)}.get_transform needs input attribute '{f}', "
f"but it is not an attribute of {type(aug_input)}!"
) from e
return args
# UniRef/detectron2/config/instantiate.py
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do
# UniRef/detectron2/utils/memory.py
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu")
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
"""
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
<fim_suffix>
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
<fim_middle>except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e | except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# UniRef/detectron2/config/lazy.py
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
# UniRef/detectron2/config/lazy.py
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
# UniRef/detectron2/engine/launch.py
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args)
"""
# Copyright (c) Facebook, Inc. and its affiliates.
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
<fim_suffix>
return cfg # return as-is if don't know what to do
<fim_middle>except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise | except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/spin_math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/render.py
def gaussianize_frustum(t0, t1):
"""Convert intervals along a conical frustum into means and variances."""
# A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415.
s = t0 + t1
d = t1 - t0
eps = jnp.finfo(jnp.float32).eps ** 2
ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2)
t_mean = s * (1 / 2 + ratio)
t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2)
r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio)
return t_mean, t_var, r_var
# camp_zipnerf/internal/geometry.py
def ray_sphere_intersection(origin,
direction,
radius = 1.0):
"""Computes the intersecting point between a ray and a sphere.
Variables use notation from Wikipedia:
u: direction of ray
o: origin of ray
References:
https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
Args:
origin: The origin of the ray.
direction: The direction of the ray.
radius: The radius of the sphere.
Returns:
The intersecting point on the sphere.
"""
u_dot_o = jnp.sum(direction * origin, axis=-1, keepdims=True)
nabla = u_dot_o**2 - (jnp.linalg.norm(origin, keepdims=True)**2 - radius**2)
# This this is a ray and not a line, we only need to consider the case where
# nabla is positive.
distance = -u_dot_o + jnp.sqrt(nabla)
return origin + distance * direction
# camp_zipnerf/internal/rigid_body.py
def rotation_matrix_from_ortho6d(ortho6d):
"""Computes the 3D rotation matrix from the 6D representation.
Zhou et al. have proposed a novel 6D representation for the rotation in
SO(3) which is completely continuous. This is highly benificial and produces
better results than most standard rotation representations for many tasks,
especially when the predicted value is close to the discontinuity of the
utilized rotation represantation. This function converts from the proposed 6
dimensional representation to the classic 3x3 rotation matrix.
See https://arxiv.org/pdf/1812.07035.pdf for more information.
Args:
ortho6d: 6D represantion for the rotation according Zhou et al. of shape
[6].
Returns:
(3, 3) The associated 3x3 rotation matrices.
"""
if ortho6d.ndim != 1 or ortho6d.shape[0] != 6:
raise ValueError('The shape of the input ortho 6D vector needs to be (6).')
a1, a2 = ortho6d[Ellipsis, :3], ortho6d[Ellipsis, 3:]
b1 = spin_math.normalize(a1)
b2 = a2 - jnp.sum(b1 * a2, axis=-1, keepdims=True) * b1
b2 = spin_math.normalize(b2)
b3 = jnp.cross(b1, b2)
return jnp.stack((b1, b2, b3), axis=-2)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: mode=yapf
"""Math utility functions."""
from typing import Optional, Union
from internal import math
import jax
from jax import numpy as jnp
import optax
def matmul(a, b):
"""jnp.matmul defaults to bfloat16 on TPU, but this doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
def safe_acos(t,
eps = jnp.finfo(jnp.float32).eps):
"""A safe version of arccos which avoids evaluating at -1 or 1."""
return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps))
def safe_log(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = jnp.finfo(jnp.float32).eps):
"""Computes a safe log that avoids evaluating at zero.
Args:
x: Input array.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
log(x) or log(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.log(safe_x)
def normalize(
x,
axis = -1,
# pylint: disable=redefined-builtin
ord = None,
eps = jnp.finfo(jnp.float32).eps,
):
"""Normalize a vector."""
return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True)
def inv_sqrtm(
matrix,
normalize_eigvals = False,
):
"""Takes the inverse matrix square root of a PSD matrix.
Forked from `coord.sqrtm`.
Args:
matrix: (..., d, d) A positive semi-definite matrix.
normalize_eigvals: If True, normalize the eigenvalues by the geometric mean.
Returns:
The inverse square root of the matrix, and (eigvec, eigval) if return_eigs
is True.
"""
eigvec, eigval = jax.lax.linalg.eigh(
matrix, symmetrize_input=False, sort_eigenvalues=False)
if normalize_eigvals:
# Equivalent to dividing by geometric mean, but numerically stabler.
log_eigval = jnp.log(eigval)
eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True))
scaling = math.safe_div(1, math.safe_sqrt(eigval))
scaling = scaling[Ellipsis, None, :]
sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return sqrtm_mat, (eigvec, eigval)
def to_homogeneous(v):
"""Converts a vector to a homogeneous representation.
Args:
v: (*, C) A non-homogeneous vector.
Returns:
(*, C+1) A homogeneous version of v.
"""
return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1)
def from_homogeneous(v):
"""Converts a homogeneous vector to a non-homogeneous vector.
Args:
v: (*, C+1) A homogeneous vector.
Returns:
(*, C) The non-homogeneous version of v.
"""
return v[Ellipsis, :-1] / v[Ellipsis, -1:]
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
def generalized_bias_and_gain(x, slope,
threshold):
<fim_suffix>
eps = jnp.finfo(jnp.float32).tiny
left_curve = (threshold * x) / (x + slope * (threshold - x) + eps)
right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope *
(threshold - x) + eps) + 1)
return jnp.where(x < threshold, left_curve, right_curve)
<fim_middle>"""Maps the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
""" | """Maps the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/coord.py
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
# camp_zipnerf/internal/geometry.py
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
# camp_zipnerf/internal/ref_utils.py
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
<fim_suffix>
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle>"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
""" | """Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/render.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
# camp_zipnerf/internal/spin_math.py
def generalized_bias_and_gain(x, slope,
threshold):
"""Maps the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
"""
eps = jnp.finfo(jnp.float32).tiny
left_curve = (threshold * x) / (x + slope * (threshold - x) + eps)
right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope *
(threshold - x) + eps) + 1)
return jnp.where(x < threshold, left_curve, right_curve)
# camp_zipnerf/internal/coord.py
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for shooting and rendering rays."""
import jax
import jax.numpy as jnp
import jax.scipy as jsp
from internal import math
from internal import stepfun
def lift_gaussian(d, t_mean, t_var, r_var, diag):
"""Lift a Gaussian defined along a ray to 3D coordinates."""
mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None]
d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True))
if diag:
d_outer_diag = d**2
null_outer_diag = 1 - d_outer_diag / d_mag_sq
t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :]
xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :]
cov_diag = t_cov_diag + xy_cov_diag
return mean, cov_diag
else:
d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :]
eye = jnp.eye(d.shape[-1])
null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :]
t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :]
xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :]
cov = t_cov + xy_cov
return mean, cov
def gaussianize_frustum(t0, t1):
<fim_suffix>
# A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415.
s = t0 + t1
d = t1 - t0
eps = jnp.finfo(jnp.float32).eps ** 2
ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2)
t_mean = s * (1 / 2 + ratio)
t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2)
r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio)
return t_mean, t_var, r_var
def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag):
"""Approximate a 3D conical frustum as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and base_radius is the
radius at dist=1. Doesn't assume `d` is normalized.
Args:
d: jnp.float32 3-vector, the axis of the cone
t0: float, the starting distance of the frustum.
t1: float, the ending distance of the frustum.
base_radius: float, the scale of the radius as a function of distance.
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
"""
t_mean, t_var, r_var = gaussianize_frustum(t0, t1)
r_var *= base_radius**2
mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag)
return mean, cov
def cylinder_to_gaussian(d, t0, t1, radius, diag):
"""Approximate a cylinder as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and radius is the
radius. Does not renormalize `d`.
Args:
d: jnp.float32 3-vector, the axis of the cylinder
t0: float, the starting distance of the cylinder.
t1: float, the ending distance of the cylinder.
radius: float, the radius of the cylinder
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
"""
t_mean = (t0 + t1) / 2
r_var = radius**2 / 4
t_var = (t1 - t0) ** 2 / 12
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True):
"""Cast rays (cone- or cylinder-shaped) and featurize sections of it.
Args:
tdist: float array, the "fencepost" distances along the ray.
origins: float array, the ray origin coordinates.
directions: float array, the ray direction vectors.
radii: float array, the radii (base radii for cones) of the rays.
ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'.
diag: boolean, whether or not the covariance matrices should be diagonal.
Returns:
a tuple of arrays of means and covariances.
"""
t0 = tdist[Ellipsis, :-1]
t1 = tdist[Ellipsis, 1:]
if ray_shape == 'cone':
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == 'cylinder':
gaussian_fn = cylinder_to_gaussian
else:
raise ValueError("ray_shape must be 'cone' or 'cylinder'")
means, covs = gaussian_fn(directions, t0, t1, radii, diag)
means = means + origins[Ellipsis, None, :]
return means, covs
def compute_alpha_weights_helper(density_delta):
"""Helper function for compute_alpha_weights."""
log_trans = -jnp.concatenate(
[
jnp.zeros_like(density_delta[Ellipsis, :1]),
jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1),
],
axis=-1,
)
alpha = 1 - jnp.exp(-density_delta)
trans = jnp.exp(log_trans)
weights = alpha * trans
return weights
def compute_alpha_weights(
density,
tdist,
dirs,
**kwargs,
):
"""Helper function for computing alpha compositing weights."""
t_delta = jnp.diff(tdist)
delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1)
density_delta = density * delta
return compute_alpha_weights_helper(density_delta, **kwargs)
def volumetric_rendering(
rgbs,
weights,
tdist,
bg_rgbs,
compute_extras,
extras=None,
percentiles = (5, 50, 95),
):
"""Volumetric Rendering Function.
Args:
rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3]
weights: jnp.ndarray(float32), weights, [batch_size, num_samples].
tdist: jnp.ndarray(float32), [batch_size, num_samples].
bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background.
compute_extras: bool, if True, compute extra quantities besides color.
extras: dict, a set of values along rays to render by alpha compositing.
percentiles: depth will be returned for these percentiles.
Returns:
rendering: a dict containing an rgb image of size [batch_size, 3], and other
visualizations if compute_extras=True.
"""
eps = jnp.finfo(jnp.float32).eps
rendering = {}
acc = weights.sum(axis=-1)
bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background.
if rgbs is not None:
rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs
else:
rgb = None
rendering['rgb'] = rgb
if compute_extras:
rendering['acc'] = acc
if extras is not None:
for k, v in extras.items():
if v is not None:
rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2)
expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc)
t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:])
# For numerical stability this expectation is computing using log-distance.
rendering['distance_mean'] = jnp.clip(
jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf),
tdist[Ellipsis, 0],
tdist[Ellipsis, -1],
)
# Normalize the weights to sum to 1.
weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None])
distance_percentiles = stepfun.weighted_percentile(
tdist, weights_norm, percentiles
)
for i, p in enumerate(percentiles):
s = 'median' if p == 50 else 'percentile_' + str(p)
rendering['distance_' + s] = distance_percentiles[Ellipsis, i]
return rendering
<fim_middle>"""Convert intervals along a conical frustum into means and variances.""" | """Convert intervals along a conical frustum into means and variances.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
<fim_suffix>
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>"""fn() with clipped inputs.""" | """fn() with clipped inputs.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/linspline.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
# camp_zipnerf/internal/utils.py
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
# camp_zipnerf/internal/render.py
def compute_alpha_weights_helper(density_delta):
"""Helper function for compute_alpha_weights."""
log_trans = -jnp.concatenate(
[
jnp.zeros_like(density_delta[Ellipsis, :1]),
jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1),
],
axis=-1,
)
alpha = 1 - jnp.exp(-density_delta)
trans = jnp.exp(log_trans)
weights = alpha * trans
return weights
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for linear splines."""
import functools
from internal import math
from internal import utils
import jax
from jax.experimental import checkify
import jax.numpy as jnp
def check_zero_endpoints(y):
checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.')
checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.')
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2):
"""Make w integrate to 1."""
utils.assert_valid_linspline(t, w)
return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None]
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
def compute_integral(t, y):
<fim_suffix>
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
<fim_middle>"""Integrate a linear spline into a piecewise quadratic spline.""" | """Integrate a linear spline into a piecewise quadratic spline.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/camera_utils.py
def cast_spherical_rays(
camtoworld,
height,
width,
near,
far,
xnp,
):
"""Generates a spherical camera ray batch."""
return cast_general_rays(
camtoworld,
xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])),
height,
width,
near,
far,
camtype=ProjectionType.PANORAMIC,
xnp=xnp,
)
# camp_zipnerf/internal/math.py
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
# camp_zipnerf/internal/loss_utils.py
def eikonal_equation(n, eps=jnp.finfo(jnp.float32).tiny):
"""Compute eikonal equation on normals, checking how close norm is to 1."""
norm = jnp.sqrt(jnp.maximum(jnp.sum(n**2, axis=-1), eps))
return jnp.mean((norm - 1.0) ** 2.0)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
<fim_suffix>
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>"""Compute spherical harmonic coefficients.""" | """Compute spherical harmonic coefficients.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
<fim_suffix>
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>"""Backpropagate using the gradient and clipped inputs.""" | """Backpropagate using the gradient and clipped inputs.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/render.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/ref_utils.py
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
# camp_zipnerf/internal/geometry.py
def ray_sphere_intersection(origin,
direction,
radius = 1.0):
"""Computes the intersecting point between a ray and a sphere.
Variables use notation from Wikipedia:
u: direction of ray
o: origin of ray
References:
https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
Args:
origin: The origin of the ray.
direction: The direction of the ray.
radius: The radius of the sphere.
Returns:
The intersecting point on the sphere.
"""
u_dot_o = jnp.sum(direction * origin, axis=-1, keepdims=True)
nabla = u_dot_o**2 - (jnp.linalg.norm(origin, keepdims=True)**2 - radius**2)
# This this is a ray and not a line, we only need to consider the case where
# nabla is positive.
distance = -u_dot_o + jnp.sqrt(nabla)
return origin + distance * direction
# camp_zipnerf/internal/rigid_body.py
def rts_to_sim3(
rotation, translation, scale
):
"""Converts a rotation, translation and scale to a homogeneous transform.
Args:
rotation: (3, 3) An orthonormal rotation matrix.
translation: (3,) A 3-vector representing a translation.
scale: A scalar factor.
Returns:
(4, 4) A homogeneous transformation matrix.
"""
transform = jnp.eye(4)
transform = transform.at[:3, :3].set(rotation * scale)
transform = transform.at[:3, 3].set(translation)
return transform
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for shooting and rendering rays."""
import jax
import jax.numpy as jnp
import jax.scipy as jsp
from internal import math
from internal import stepfun
def lift_gaussian(d, t_mean, t_var, r_var, diag):
"""Lift a Gaussian defined along a ray to 3D coordinates."""
mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None]
d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True))
if diag:
d_outer_diag = d**2
null_outer_diag = 1 - d_outer_diag / d_mag_sq
t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :]
xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :]
cov_diag = t_cov_diag + xy_cov_diag
return mean, cov_diag
else:
d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :]
eye = jnp.eye(d.shape[-1])
null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :]
t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :]
xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :]
cov = t_cov + xy_cov
return mean, cov
def gaussianize_frustum(t0, t1):
"""Convert intervals along a conical frustum into means and variances."""
# A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415.
s = t0 + t1
d = t1 - t0
eps = jnp.finfo(jnp.float32).eps ** 2
ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2)
t_mean = s * (1 / 2 + ratio)
t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2)
r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio)
return t_mean, t_var, r_var
def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag):
"""Approximate a 3D conical frustum as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and base_radius is the
radius at dist=1. Doesn't assume `d` is normalized.
Args:
d: jnp.float32 3-vector, the axis of the cone
t0: float, the starting distance of the frustum.
t1: float, the ending distance of the frustum.
base_radius: float, the scale of the radius as a function of distance.
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
"""
t_mean, t_var, r_var = gaussianize_frustum(t0, t1)
r_var *= base_radius**2
mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag)
return mean, cov
def cylinder_to_gaussian(d, t0, t1, radius, diag):
<fim_suffix>
t_mean = (t0 + t1) / 2
r_var = radius**2 / 4
t_var = (t1 - t0) ** 2 / 12
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True):
"""Cast rays (cone- or cylinder-shaped) and featurize sections of it.
Args:
tdist: float array, the "fencepost" distances along the ray.
origins: float array, the ray origin coordinates.
directions: float array, the ray direction vectors.
radii: float array, the radii (base radii for cones) of the rays.
ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'.
diag: boolean, whether or not the covariance matrices should be diagonal.
Returns:
a tuple of arrays of means and covariances.
"""
t0 = tdist[Ellipsis, :-1]
t1 = tdist[Ellipsis, 1:]
if ray_shape == 'cone':
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == 'cylinder':
gaussian_fn = cylinder_to_gaussian
else:
raise ValueError("ray_shape must be 'cone' or 'cylinder'")
means, covs = gaussian_fn(directions, t0, t1, radii, diag)
means = means + origins[Ellipsis, None, :]
return means, covs
def compute_alpha_weights_helper(density_delta):
"""Helper function for compute_alpha_weights."""
log_trans = -jnp.concatenate(
[
jnp.zeros_like(density_delta[Ellipsis, :1]),
jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1),
],
axis=-1,
)
alpha = 1 - jnp.exp(-density_delta)
trans = jnp.exp(log_trans)
weights = alpha * trans
return weights
def compute_alpha_weights(
density,
tdist,
dirs,
**kwargs,
):
"""Helper function for computing alpha compositing weights."""
t_delta = jnp.diff(tdist)
delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1)
density_delta = density * delta
return compute_alpha_weights_helper(density_delta, **kwargs)
def volumetric_rendering(
rgbs,
weights,
tdist,
bg_rgbs,
compute_extras,
extras=None,
percentiles = (5, 50, 95),
):
"""Volumetric Rendering Function.
Args:
rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3]
weights: jnp.ndarray(float32), weights, [batch_size, num_samples].
tdist: jnp.ndarray(float32), [batch_size, num_samples].
bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background.
compute_extras: bool, if True, compute extra quantities besides color.
extras: dict, a set of values along rays to render by alpha compositing.
percentiles: depth will be returned for these percentiles.
Returns:
rendering: a dict containing an rgb image of size [batch_size, 3], and other
visualizations if compute_extras=True.
"""
eps = jnp.finfo(jnp.float32).eps
rendering = {}
acc = weights.sum(axis=-1)
bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background.
if rgbs is not None:
rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs
else:
rgb = None
rendering['rgb'] = rgb
if compute_extras:
rendering['acc'] = acc
if extras is not None:
for k, v in extras.items():
if v is not None:
rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2)
expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc)
t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:])
# For numerical stability this expectation is computing using log-distance.
rendering['distance_mean'] = jnp.clip(
jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf),
tdist[Ellipsis, 0],
tdist[Ellipsis, -1],
)
# Normalize the weights to sum to 1.
weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None])
distance_percentiles = stepfun.weighted_percentile(
tdist, weights_norm, percentiles
)
for i, p in enumerate(percentiles):
s = 'median' if p == 50 else 'percentile_' + str(p)
rendering['distance_' + s] = distance_percentiles[Ellipsis, i]
return rendering
<fim_middle>"""Approximate a cylinder as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and radius is the
radius. Does not renormalize `d`.
Args:
d: jnp.float32 3-vector, the axis of the cylinder
t0: float, the starting distance of the cylinder.
t1: float, the ending distance of the cylinder.
radius: float, the radius of the cylinder
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
""" | """Approximate a cylinder as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and radius is the
radius. Does not renormalize `d`.
Args:
d: jnp.float32 3-vector, the axis of the cylinder
t0: float, the starting distance of the cylinder.
t1: float, the ending distance of the cylinder.
radius: float, the radius of the cylinder
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
# camp_zipnerf/internal/image_utils.py
def psnr_to_mse(psnr):
"""Compute MSE given a PSNR (we assume the maximum pixel value is 1)."""
return jnp.exp(-0.1 * jnp.log(10.0) * psnr)
# camp_zipnerf/internal/quaternion.py
def conjugate(q):
"""Compute the conjugate of a quaternion."""
return jnp.concatenate([-im(q), re(q)], axis=-1)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
<fim_suffix>
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>"""Compute the mean of sin(x), x ~ N(mean, var).""" | """Compute the mean of sin(x), x ~ N(mean, var).""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geometry.py
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
# camp_zipnerf/internal/geopoly.py
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
# camp_zipnerf/internal/linspline.py
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
<fim_suffix>
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>"""Clamps `x` from below to be positive.""" | """Clamps `x` from below to be positive.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/rigid_body.py
def exp_so3(
axis_angle, eps=jnp.finfo(jnp.float32).eps
):
"""Exponential map from Lie algebra so3 to Lie group SO3.
Modern Robotics Eqn 3.51, a.k.a. Rodrigues' formula.
Args:
axis_angle: A 3-vector where the direction is the axis of rotation and the
magnitude is the angle of rotation.
eps: an epsilon value for numerical stability.
Returns:
R: (3, 3) An orthonormal rotation matrix representing the same rotation.
"""
theta_squared = jnp.sum(axis_angle**2, axis=-1)
theta = _safe_sqrt(theta_squared)
# Near zero, we switch to using the first order Taylor expansion.
R_taylor = jnp.eye(3) + skew(axis_angle)
# Prevent bad gradients from propagating back when theta is small.
axis_angle_safe = jnp.where(theta_squared > eps**2, axis_angle, 0.0)
theta_safe = jnp.where(theta_squared > eps**2, theta, 1.0)
axis = axis_angle_safe / theta_safe
W = skew(axis)
R = (
jnp.eye(3)
+ jnp.sin(theta_safe) * W
+ (1.0 - jnp.cos(theta_safe)) * spin_math.matmul(W, W)
)
return jnp.where(theta_squared > eps**2, R, R_taylor)
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/linspline.py
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
<fim_suffix>
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle>if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples) | if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/linspline.py
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
# camp_zipnerf/internal/camera_delta.py
def _v_ravel_pytree(pytree):
"""Ravels a batched pytree for each batch separately.
Unfortunately `ravel_pytree` cannot be directly used with `jax.vmap` because
it returns a function (`unflatten_fn`). We therefore apply vmap to just the
first return value, which is the flattened params, and fetch the unflatten
function separately.
Example:
flat_params, unflatten_fn = _v_ravel_pytree(pytree)
pytree = jax.vmap(unflatten_fn)(flat_params)
Args:
pytree: The pytree to flatten.
Returns:
A tuple containing the flattened pytree, with each batch item flattened
separately, and the unbatched unflatten function. The unflatten function
must be vmapped.
"""
flat_params = jax.vmap(lambda p: jax.flatten_util.ravel_pytree(p)[0])(pytree)
_, unflatten_fn = jax.flatten_util.ravel_pytree(
jax.tree_util.tree_map(lambda x: x[0], pytree)
)
return flat_params, unflatten_fn
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
<fim_suffix>
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None | if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/rigid_body.py
def exp_so3(
axis_angle, eps=jnp.finfo(jnp.float32).eps
):
"""Exponential map from Lie algebra so3 to Lie group SO3.
Modern Robotics Eqn 3.51, a.k.a. Rodrigues' formula.
Args:
axis_angle: A 3-vector where the direction is the axis of rotation and the
magnitude is the angle of rotation.
eps: an epsilon value for numerical stability.
Returns:
R: (3, 3) An orthonormal rotation matrix representing the same rotation.
"""
theta_squared = jnp.sum(axis_angle**2, axis=-1)
theta = _safe_sqrt(theta_squared)
# Near zero, we switch to using the first order Taylor expansion.
R_taylor = jnp.eye(3) + skew(axis_angle)
# Prevent bad gradients from propagating back when theta is small.
axis_angle_safe = jnp.where(theta_squared > eps**2, axis_angle, 0.0)
theta_safe = jnp.where(theta_squared > eps**2, theta, 1.0)
axis = axis_angle_safe / theta_safe
W = skew(axis)
R = (
jnp.eye(3)
+ jnp.sin(theta_safe) * W
+ (1.0 - jnp.cos(theta_safe)) * spin_math.matmul(W, W)
)
return jnp.where(theta_squared > eps**2, R, R_taylor)
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/linspline.py
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
<fim_suffix>
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle>if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
) | if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
<fim_suffix>
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>if not populating_data and results_queue.empty():
break | if not populating_data and results_queue.empty():
break | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/spin_math.py
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
# camp_zipnerf/internal/stepfun.py
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
# camp_zipnerf/internal/geometry.py
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
<fim_suffix>
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.') | if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.') | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
<fim_suffix>
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>if not populating_data and results_queue.empty():
break | if not populating_data and results_queue.empty():
break | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |