code
stringlengths 17
6.64M
|
---|
def main():
parser = argparse.ArgumentParser(description='Export the Napolab benchmark datasets as CSV')
parser.add_argument('--output_path', type=str, default=os.getcwd(), help='The path where datasets will be saved. Default is the current directory.')
parser.add_argument('--include_translations', type=bool, default=True, help='Whether to include translated versions of the datasets. Defaults to True.')
args = parser.parse_args()
export_napolab_benchmark(args.output_path, args.include_translations)
|
class DatasetLoader():
'\n A class responsible for loading the datasets of the Napolab benchmark and performing various preprocessing operations.\n\n Attributes:\n DATASET_NAMES (list): List of supported dataset names.\n SELECTED_COLUMNS (dict): Columns to select from datasets.\n RENAME_COLUMNS_KEY (dict): Columns to rename for each dataset.\n SUPPORTED_LANGUAGES (dict): Supported languages with their respective codes.\n SUPPORTED_VARIANTS (list): List of supported dataset variants.\n ASSIN_SPLIT_RANGES (dict): Number of items for each split in ASSIN datasets.\n '
DATASET_NAMES = ['assin', 'assin2', 'rerelem', 'hatebr', 'reli-sa', 'faquad-nli', 'porsimplessent']
SELECTED_COLUMNS = {'assin': ['premise', 'hypothesis', 'relatedness_score', 'entailment_judgment'], 'assin2': ['premise', 'hypothesis', 'relatedness_score', 'entailment_judgment'], 'rerelem': ['sentence1', 'sentence2', 'label'], 'hatebr': ['instagram_comments', 'offensive_language'], 'reli-sa': ['sentence', 'label'], 'faquad-nli': ['question', 'answer', 'label'], 'porsimplessent': ['sentence1', 'sentence2', 'label']}
RENAME_COLUMNS_KEY = {'assin': {'premise': 'sentence1', 'hypothesis': 'sentence2'}, 'assin2': {'premise': 'sentence1', 'hypothesis': 'sentence2'}, 'hatebr': {'instagram_comments': 'sentence1', 'offensive_language': 'label'}, 'reli-sa': {'sentence': 'sentence1'}, 'faquad-nli': {'question': 'sentence1', 'answer': 'sentence2'}}
SUPPORTED_LANGUAGES = {'portuguese': 'por', 'english': 'eng', 'spanish': 'spa', 'catalan': 'cat', 'galician': 'glg'}
SUPPORTED_VARIANTS = ['full', 'br', 'pt']
ASSIN_SPLIT_RANGES = {'train': 2500, 'validation': 500, 'test': 2000}
def validate_parameters(self, dataset_name: str, language: str, variant: str) -> None:
'\n Validate the provided parameters for loading datasets.\n\n Args:\n dataset_name (str): Name of the dataset.\n language (str): Language of the dataset.\n variant (str): Variant of the dataset.\n\n Raises:\n ValueError: If the dataset name, language, or variant is not supported.\n '
if (dataset_name not in self.DATASET_NAMES):
raise ValueError(f'Dataset name must be one of {self.DATASET_NAMES}')
if (variant not in self.SUPPORTED_VARIANTS):
raise ValueError(f'Variant must be one of {self.SUPPORTED_VARIANTS}')
if (not ((language in self.SUPPORTED_LANGUAGES) or (language in self.SUPPORTED_LANGUAGES.values()))):
raise ValueError(f'Language must be one of {self.SUPPORTED_LANGUAGES.keys()} or {self.SUPPORTED_LANGUAGES.values()}')
def get_dataset_name(self, dataset_name: str, language: str) -> str:
'\n Construct the dataset name based on dataset_name and language.\n\n Args:\n dataset_name (str): Name of the dataset.\n language (str): Language of the dataset.\n\n Returns:\n str: Complete dataset name.\n '
if (not (dataset_name.startswith('assin') and (language in ['por', 'portuguese']))):
name = f'ruanchaves/{dataset_name}'
else:
name = dataset_name
if (language not in ['por', 'portuguese']):
language_code = self.SUPPORTED_LANGUAGES.get(language, language)
name = (((name + '_por_Latn_to_') + language_code) + '_Latn')
return name
def apply_variant_filter(self, dataset: DatasetDict, name: str, variant: str) -> DatasetDict:
'\n Apply a variant filter to the dataset, especially for ASSIN datasets.\n\n Args:\n dataset (DatasetDict): The dataset to filter.\n name (str): Name of the dataset.\n variant (str): Desired dataset variant.\n\n Returns:\n DatasetDict: Filtered dataset.\n '
if ((variant != 'full') and name.startswith('assin_')):
for split in ['train', 'validation', 'test']:
split_range = self.ASSIN_SPLIT_RANGES[split]
if (variant == 'br'):
dataset[split] = dataset[split].select([i for i in range(0, split_range)])
elif (variant == 'pt'):
dataset[split] = dataset[split].select([i for i in range(split_range, (split_range * 2))])
return dataset
def process_assin(self, dataset: Dataset, task: str) -> Dataset:
"\n Process ASSIN datasets based on the given task.\n\n Args:\n dataset (Dataset): Dataset to process.\n task (str): Task type - one of ['entailment', 'rte', 'similarity', 'sts'].\n\n Returns:\n Dataset: Processed dataset.\n\n Raises:\n ValueError: If an unsupported task is provided for ASSIN datasets.\n "
if (task in ['entailment', 'rte']):
dataset = dataset.rename_column('entailment_judgment', 'label')
dataset = dataset.remove_columns('relatedness_score')
elif (task in ['similarity', 'sts']):
dataset = dataset.rename_column('relatedness_score', 'label')
dataset = dataset.remove_columns('entailment_judgment')
else:
raise ValueError("'task' argument value must be one of ['entailment', 'rte', 'similarity', 'sts'] for dataset of type ['assin','assin2']")
return dataset
def rename_dataset_columns(self, dataset: Dataset, dataset_name: str) -> Dataset:
'\n Rename columns of the dataset based on the dataset name.\n\n Args:\n dataset (Dataset): Dataset with columns to rename.\n dataset_name (str): Name of the dataset.\n\n Returns:\n Dataset: Dataset with renamed columns.\n '
if (dataset_name in self.RENAME_COLUMNS_KEY):
for column in self.RENAME_COLUMNS_KEY[dataset_name]:
dataset = dataset.rename_column(column, self.RENAME_COLUMNS_KEY[dataset_name][column])
return dataset
def clean_dataset(self, dataset: DatasetDict, dataset_name: str, task: str) -> DatasetDict:
'\n Clean the dataset by selecting, renaming, and processing columns.\n\n Args:\n dataset (DatasetDict): Dataset to clean.\n dataset_name (str): Name of the dataset.\n task (str): Task type.\n\n Returns:\n DatasetDict: Cleaned dataset.\n '
for split in ['train', 'validation', 'test']:
print(type(dataset[split]))
drop_list = [column for column in list(dataset[split].features.keys()) if (column not in self.SELECTED_COLUMNS[dataset_name])]
dataset[split] = dataset[split].remove_columns(drop_list)
if dataset_name.startswith('assin'):
dataset[split] = self.process_assin(dataset[split], task)
dataset[split] = self.rename_dataset_columns(dataset[split], dataset_name)
return dataset
def load(self, dataset_name: str, language: str='por', variant: str='full', clean: bool=True, task: Optional[str]=None, hf_args: List=[], hf_kwargs: Dict={}) -> DatasetDict:
'\n Load a dataset, optionally clean it, and return the processed dataset.\n\n Args:\n dataset_name (str): Name of the dataset to load.\n language (str, optional): Language of the dataset. Defaults to "por".\n variant (str, optional): Variant of the dataset. Defaults to "full". Relevant only if retrieving cleaned and translated ASSIN datasets.\n clean (bool, optional): Whether to clean the dataset, i.e. drop columns not relevant to the benchmark. Defaults to True.\n task (str, optional): Task type. Relevant only if retrieving cleaned ASSIN datasets.\n hf_args (list, optional): Positional arguments to pass to `datasets.load_dataset`. Defaults to an empty list.\n hf_kwargs (dict, optional): Keyword arguments to pass to `datasets.load_dataset`. Defaults to an empty dict.\n\n Returns:\n DatasetDict: Loaded (and optionally cleaned) dataset.\n '
self.validate_parameters(dataset_name, language, variant)
name = self.get_dataset_name(dataset_name, language)
dataset = datasets.load_dataset(name, *hf_args, **hf_kwargs)
dataset = self.apply_variant_filter(dataset, name, variant)
if clean:
dataset = self.clean_dataset(dataset, dataset_name, task)
return dataset
|
def load_napolab_benchmark(include_translations=True):
" \n Load the Napolab benchmark datasets, and optionally their translations.\n\n Args:\n include_translations (bool): Determines if translated versions of the datasets should be \n loaded. Defaults to True.\n\n Returns:\n dict: A dictionary with two main keys:\n 'datasets': A dictionary with dataset names as keys and loaded datasets as values.\n 'translations': A dictionary with languages (e.g., 'english', 'spanish') as keys, \n and a nested dictionary with dataset names as keys and loaded datasets as values.\n "
loader = DatasetLoader()
datasets = {}
for dataset_name in loader.DATASET_NAMES:
if (dataset_name in ['assin', 'assin2']):
datasets[f'{dataset_name}-rte'] = loader.load(dataset_name, task='rte')
datasets[f'{dataset_name}-sts'] = loader.load(dataset_name, task='sts')
else:
datasets[dataset_name] = loader.load(dataset_name)
datasets['assin-rte-ptbr'] = loader.load('assin', task='rte', hf_args=['ptbr'])
datasets['assin-rte-ptpt'] = loader.load('assin', task='rte', hf_args=['ptpt'])
datasets['assin-sts-ptbr'] = loader.load('assin', task='sts', hf_args=['ptbr'])
datasets['assin-sts-ptpt'] = loader.load('assin', task='sts', hf_args=['ptpt'])
translated_datasets = {}
if include_translations:
for language in ['english', 'spanish', 'galician', 'catalan']:
if (language not in translated_datasets):
translated_datasets[language] = {}
for dataset_name in loader.DATASET_NAMES:
if (dataset_name in ['assin', 'assin2']):
translated_datasets[language][f'{dataset_name}-rte'] = loader.load(dataset_name, task='rte', language=language)
translated_datasets[language][f'{dataset_name}-sts'] = loader.load(dataset_name, task='sts', language=language)
if (dataset_name == 'assin'):
translated_datasets[language]['assin-rte-ptbr'] = loader.load('assin', task='rte', variant='br')
translated_datasets[language]['assin-rte-ptpt'] = loader.load('assin', task='rte', variant='pt')
translated_datasets[language]['assin-sts-ptbr'] = loader.load('assin', task='sts', variant='br')
translated_datasets[language]['assin-sts-ptpt'] = loader.load('assin', task='sts', variant='pt')
else:
translated_datasets[language][dataset_name] = loader.load(dataset_name, language=language)
output = {'datasets': datasets, 'translations': translated_datasets}
return output
|
def export_napolab_benchmark(output_path, include_translations=True):
'\n Load the Napolab benchmark datasets using load_napolab_benchmark and save each split of \n each dataset as CSV in a structured hierarchy of folders and subfolders.\n \n Args:\n output_path (str): The path where datasets will be saved.\n include_translations (bool): Determines if translated versions of the datasets should be \n saved. Defaults to True.\n '
data = load_napolab_benchmark(include_translations=include_translations)
os.makedirs(os.path.join(output_path, 'datasets'), exist_ok=True)
if include_translations:
os.makedirs(os.path.join(output_path, 'translations'), exist_ok=True)
for (dataset_name, dataset_obj) in data['datasets'].items():
for (split, split_data) in dataset_obj.items():
split_path = os.path.join(output_path, 'datasets', dataset_name, f'{split}.csv')
os.makedirs(os.path.dirname(split_path), exist_ok=True)
split_data.to_pandas().to_csv(split_path, index=False)
logging.info(f'Saved {split_path}')
if include_translations:
for (language, datasets) in data['translations'].items():
for (dataset_name, dataset_obj) in datasets.items():
for (split, split_data) in dataset_obj.items():
split_path = os.path.join(output_path, 'translations', language, dataset_name, f'{split}.csv')
os.makedirs(os.path.dirname(split_path), exist_ok=True)
split_data.to_pandas().to_csv(split_path, index=False)
logging.info(f'Saved {split_path}')
|
def test_validate_parameters_invalid_dataset_name():
with pytest.raises(ValueError):
loader.validate_parameters('invalid_dataset_name', 'portuguese', 'full')
|
def test_validate_parameters_invalid_language():
with pytest.raises(ValueError):
loader.validate_parameters('assin', 'invalid_language', 'full')
|
def test_validate_parameters_invalid_variant():
with pytest.raises(ValueError):
loader.validate_parameters('assin', 'portuguese', 'invalid_variant')
|
def test_get_dataset_name_non_assin():
assert (loader.get_dataset_name('rerelem', 'english') == 'ruanchaves/rerelem_por_Latn_to_eng_Latn')
|
def test_get_dataset_name_assin():
assert (loader.get_dataset_name('assin', 'portuguese') == 'assin')
|
def test_get_dataset_name_assin_other_language():
assert (loader.get_dataset_name('assin', 'english') == 'ruanchaves/assin_por_Latn_to_eng_Latn')
|
def load_config():
return yaml.load(open((Path(__file__).parent / 'config.yml'), 'r'), Loader=yaml.FullLoader)
|
def check_os_environ(key, use):
if (key not in os.environ):
raise ValueError(f'{key} is not defined in the os variables, it is required for {use}.')
|
def dataset_dir():
check_os_environ('DATASET', 'data loading')
return os.environ['DATASET']
|
class ADE20KSegmentation(BaseMMSeg):
def __init__(self, image_size, crop_size, split, **kwargs):
super().__init__(image_size, crop_size, split, ADE20K_CONFIG_PATH, **kwargs)
(self.names, self.colors) = utils.dataset_cat_description(ADE20K_CATS_PATH)
self.n_cls = 150
self.ignore_label = 0
self.reduce_zero_label = True
def update_default_config(self, config):
root_dir = dataset_dir()
path = (Path(root_dir) / 'ade20k')
config.data_root = path
if (self.split == 'train'):
config.data.train.data_root = (path / 'ADEChallengeData2016')
elif (self.split == 'trainval'):
config.data.trainval.data_root = (path / 'ADEChallengeData2016')
elif (self.split == 'val'):
config.data.val.data_root = (path / 'ADEChallengeData2016')
elif (self.split == 'test'):
config.data.test.data_root = (path / 'release_test')
config = super().update_default_config(config)
return config
def test_post_process(self, labels):
return (labels + 1)
|
class BaseMMSeg(Dataset):
def __init__(self, image_size, crop_size, split, config_path, normalization, **kwargs):
super().__init__()
self.image_size = image_size
self.crop_size = crop_size
self.split = split
self.normalization = STATS[normalization].copy()
self.ignore_label = None
for (k, v) in self.normalization.items():
v = np.round((255 * np.array(v)), 2)
self.normalization[k] = tuple(v)
print(f'Use normalization: {self.normalization}')
config = Config.fromfile(config_path)
self.ratio = config.max_ratio
self.dataset = None
self.config = self.update_default_config(config)
self.dataset = build_dataset(getattr(self.config.data, f'{self.split}'))
def update_default_config(self, config):
train_splits = ['train', 'trainval']
if (self.split in train_splits):
config_pipeline = getattr(config, f'train_pipeline')
else:
config_pipeline = getattr(config, f'{self.split}_pipeline')
img_scale = ((self.ratio * self.image_size), self.image_size)
if (self.split not in train_splits):
assert (config_pipeline[1]['type'] == 'MultiScaleFlipAug')
config_pipeline = config_pipeline[1]['transforms']
for (i, op) in enumerate(config_pipeline):
op_type = op['type']
if (op_type == 'Resize'):
op['img_scale'] = img_scale
elif (op_type == 'RandomCrop'):
op['crop_size'] = (self.crop_size, self.crop_size)
elif (op_type == 'Normalize'):
op['mean'] = self.normalization['mean']
op['std'] = self.normalization['std']
elif (op_type == 'Pad'):
op['size'] = (self.crop_size, self.crop_size)
config_pipeline[i] = op
if (self.split == 'train'):
config.data.train.pipeline = config_pipeline
elif (self.split == 'trainval'):
config.data.trainval.pipeline = config_pipeline
elif (self.split == 'val'):
config.data.val.pipeline[1]['img_scale'] = img_scale
config.data.val.pipeline[1]['transforms'] = config_pipeline
elif (self.split == 'test'):
config.data.test.pipeline[1]['img_scale'] = img_scale
config.data.test.pipeline[1]['transforms'] = config_pipeline
config.data.test.test_mode = True
else:
raise ValueError(f'Unknown split: {self.split}')
return config
def set_multiscale_mode(self):
self.config.data.val.pipeline[1]['img_ratios'] = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
self.config.data.val.pipeline[1]['flip'] = True
self.config.data.test.pipeline[1]['img_ratios'] = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
self.config.data.test.pipeline[1]['flip'] = True
self.dataset = build_dataset(getattr(self.config.data, f'{self.split}'))
def __getitem__(self, idx):
data = self.dataset[idx]
train_splits = ['train', 'trainval']
if (self.split in train_splits):
im = data['img'].data
seg = data['gt_semantic_seg'].data.squeeze(0)
else:
im = [im.data for im in data['img']]
seg = None
out = dict(im=im)
if (self.split in train_splits):
out['segmentation'] = seg
else:
im_metas = [meta.data for meta in data['img_metas']]
out['im_metas'] = im_metas
out['colors'] = self.colors
return out
def get_gt_seg_maps(self):
dataset = self.dataset
gt_seg_maps = {}
for img_info in dataset.img_infos:
seg_map = (Path(dataset.ann_dir) / img_info['ann']['seg_map'])
gt_seg_map = mmcv.imread(seg_map, flag='unchanged', backend='pillow')
gt_seg_map[(gt_seg_map == self.ignore_label)] = IGNORE_LABEL
if self.reduce_zero_label:
gt_seg_map[(gt_seg_map != IGNORE_LABEL)] -= 1
gt_seg_maps[img_info['filename']] = gt_seg_map
return gt_seg_maps
def __len__(self):
return len(self.dataset)
@property
def unwrapped(self):
return self
def set_epoch(self, epoch):
pass
def get_diagnostics(self, logger):
pass
def get_snapshot(self):
return {}
def end_epoch(self, epoch):
return
|
class CityscapesDataset(BaseMMSeg):
def __init__(self, image_size, crop_size, split, **kwargs):
super().__init__(image_size, crop_size, split, CITYSCAPES_CONFIG_PATH, **kwargs)
(self.names, self.colors) = utils.dataset_cat_description(CITYSCAPES_CATS_PATH)
self.n_cls = 19
self.ignore_label = 255
self.reduce_zero_label = False
def update_default_config(self, config):
root_dir = dataset_dir()
path = (Path(root_dir) / 'cityscapes')
config.data_root = path
config.data[self.split]['data_root'] = path
config = super().update_default_config(config)
return config
def test_post_process(self, labels):
labels_copy = np.copy(labels)
cats = np.unique(labels_copy)
for cat in cats:
labels_copy[(labels == cat)] = CSLabels.trainId2label[cat].id
return labels_copy
|
def create_dataset(dataset_kwargs):
dataset_kwargs = dataset_kwargs.copy()
dataset_name = dataset_kwargs.pop('dataset')
batch_size = dataset_kwargs.pop('batch_size')
num_workers = dataset_kwargs.pop('num_workers')
split = dataset_kwargs.pop('split')
if (dataset_name == 'imagenet'):
dataset_kwargs.pop('patch_size')
dataset = ImagenetDataset(split=split, **dataset_kwargs)
elif (dataset_name == 'ade20k'):
dataset = ADE20KSegmentation(split=split, **dataset_kwargs)
elif (dataset_name == 'pascal_context'):
dataset = PascalContextDataset(split=split, **dataset_kwargs)
elif (dataset_name == 'cityscapes'):
dataset = CityscapesDataset(split=split, **dataset_kwargs)
else:
raise ValueError(f'Dataset {dataset_name} is unknown.')
dataset = Loader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, distributed=ptu.distributed, split=split)
return dataset
|
class ImagenetDataset(Dataset):
def __init__(self, root_dir, image_size=224, crop_size=224, split='train', normalization='vit'):
super().__init__()
assert (image_size[0] == image_size[1])
self.path = (Path(root_dir) / split)
self.crop_size = crop_size
self.image_size = image_size
self.split = split
self.normalization = normalization
if (split == 'train'):
self.transform = transforms.Compose([transforms.RandomResizedCrop(self.crop_size, interpolation=3), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
else:
self.transform = transforms.Compose([transforms.Resize((image_size[0] + 32), interpolation=3), transforms.CenterCrop(self.crop_size), transforms.ToTensor()])
self.base_dataset = datasets.ImageFolder(self.path, self.transform)
self.n_cls = 1000
@property
def unwrapped(self):
return self
def __len__(self):
return len(self.base_dataset)
def __getitem__(self, idx):
(im, target) = self.base_dataset[idx]
im = utils.rgb_normalize(im, self.normalization)
return dict(im=im, target=target)
|
class Loader(DataLoader):
def __init__(self, dataset, batch_size, num_workers, distributed, split):
if distributed:
sampler = DistributedSampler(dataset, shuffle=True)
super().__init__(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, sampler=sampler)
else:
super().__init__(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
self.base_dataset = self.dataset
@property
def unwrapped(self):
return self.base_dataset.unwrapped
def set_epoch(self, epoch):
if isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def get_diagnostics(self, logger):
return self.base_dataset.get_diagnostics(logger)
def get_snapshot(self):
return self.base_dataset.get_snapshot()
def end_epoch(self, epoch):
return self.base_dataset.end_epoch(epoch)
|
class PascalContextDataset(BaseMMSeg):
def __init__(self, image_size, crop_size, split, **kwargs):
super().__init__(image_size, crop_size, split, PASCAL_CONTEXT_CONFIG_PATH, **kwargs)
(self.names, self.colors) = utils.dataset_cat_description(PASCAL_CONTEXT_CATS_PATH)
self.n_cls = 60
self.ignore_label = 255
self.reduce_zero_label = False
def update_default_config(self, config):
root_dir = dataset_dir()
path = (Path(root_dir) / 'pcontext')
config.data_root = path
if (self.split == 'train'):
config.data.train.data_root = (path / 'VOCdevkit/VOC2010/')
elif (self.split == 'val'):
config.data.val.data_root = (path / 'VOCdevkit/VOC2010/')
elif (self.split == 'test'):
raise ValueError('Test split is not valid for Pascal Context dataset')
config = super().update_default_config(config)
return config
def test_post_process(self, labels):
return labels
|
def train_one_epoch(model, data_loader, optimizer, lr_scheduler, epoch, amp_autocast, loss_scaler):
criterion = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL)
logger = MetricLogger(delimiter=' ')
header = f'Epoch: [{epoch}]'
print_freq = 100
model.train()
data_loader.set_epoch(epoch)
num_updates = (epoch * len(data_loader))
for batch in logger.log_every(data_loader, print_freq, header):
im = batch['im'].to(ptu.device)
seg_gt = batch['segmentation'].long().to(ptu.device)
with amp_autocast():
seg_pred = model.forward(im)
loss = criterion(seg_pred, seg_gt)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value), force=True)
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, parameters=model.parameters())
else:
loss.backward()
optimizer.step()
num_updates += 1
lr_scheduler.step_update(num_updates=num_updates)
torch.cuda.synchronize()
logger.update(loss=loss.item(), learning_rate=optimizer.param_groups[0]['lr'])
return logger
|
@torch.no_grad()
def evaluate(model, data_loader, val_seg_gt, window_size, window_stride, amp_autocast):
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
logger = MetricLogger(delimiter=' ')
header = 'Eval:'
print_freq = 50
val_seg_pred = {}
model.eval()
for batch in logger.log_every(data_loader, print_freq, header):
ims = [im.to(ptu.device) for im in batch['im']]
ims_metas = batch['im_metas']
ori_shape = ims_metas[0]['ori_shape']
ori_shape = (ori_shape[0].item(), ori_shape[1].item())
filename = batch['im_metas'][0]['ori_filename'][0]
with amp_autocast():
seg_pred = utils.inference(model_without_ddp, ims, ims_metas, ori_shape, window_size, window_stride, batch_size=1)
seg_pred = seg_pred.argmax(0)
seg_pred = seg_pred.cpu().numpy()
val_seg_pred[filename] = seg_pred
val_seg_pred = gather_data(val_seg_pred)
scores = compute_metrics(val_seg_pred, val_seg_gt, data_loader.unwrapped.n_cls, ignore_index=IGNORE_LABEL, distributed=ptu.distributed)
for (k, v) in scores.items():
logger.update(**{f'{k}': v, 'n': 1})
return logger
|
def compute_labels(model, batch):
im = batch['im']
target = batch['target']
with torch.no_grad():
with torch.cuda.amp.autocast():
output = model.forward(im)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
return (acc1.item(), acc5.item())
|
def eval_dataset(model, dataset_kwargs):
db = create_dataset(dataset_kwargs)
print_freq = 20
header = ''
logger = MetricLogger(delimiter=' ')
for batch in logger.log_every(db, print_freq, header):
for (k, v) in batch.items():
batch[k] = v.to(ptu.device)
(acc1, acc5) = compute_labels(model, batch)
batch_size = batch['im'].size(0)
logger.update(acc1=acc1, n=batch_size)
logger.update(acc5=acc5, n=batch_size)
print(f'Imagenet accuracy: {logger}')
|
@click.command()
@click.argument('backbone', type=str)
@click.option('--imagenet-dir', type=str)
@click.option('-bs', '--batch-size', default=32, type=int)
@click.option('-nw', '--num-workers', default=10, type=int)
@click.option('-gpu', '--gpu/--no-gpu', default=True, is_flag=True)
def main(backbone, imagenet_dir, batch_size, num_workers, gpu):
ptu.set_gpu_mode(gpu)
cfg = config.load_config()
cfg = cfg['model'][backbone]
cfg['backbone'] = backbone
cfg['image_size'] = (cfg['image_size'], cfg['image_size'])
dataset_kwargs = dict(dataset='imagenet', root_dir=imagenet_dir, image_size=cfg['image_size'], crop_size=cfg['image_size'], patch_size=cfg['patch_size'], batch_size=batch_size, num_workers=num_workers, split='val', normalization=STATS[cfg['normalization']])
model = create_vit(cfg)
model.to(ptu.device)
model.eval()
eval_dataset(model, dataset_kwargs)
|
def blend_im(im, seg, alpha=0.5):
pil_im = Image.fromarray(im)
pil_seg = Image.fromarray(seg)
im_blend = Image.blend(pil_im, pil_seg, alpha).convert('RGB')
return np.asarray(im_blend)
|
def save_im(save_dir, save_name, im, seg_pred, seg_gt, colors, blend, normalization):
seg_rgb = seg_to_rgb(seg_gt[None], colors)
pred_rgb = seg_to_rgb(seg_pred[None], colors)
im_unnorm = rgb_denormalize(im, normalization)
save_dir = Path(save_dir)
im_uint = im_unnorm.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
seg_rgb_uint = (255 * seg_rgb.cpu().numpy()).astype(np.uint8)
seg_pred_uint = (255 * pred_rgb.cpu().numpy()).astype(np.uint8)
for i in range(pred_rgb.shape[0]):
if blend:
blend_pred = blend_im(im_uint[i], seg_pred_uint[i])
blend_gt = blend_im(im_uint[i], seg_rgb_uint[i])
ims = (im_uint[i], blend_pred, blend_gt)
else:
ims = (im_uint[i], seg_pred_uint[i], seg_rgb_uint[i])
for (im, im_dir) in zip(ims, ((save_dir / 'input'), (save_dir / 'pred'), (save_dir / 'gt'))):
pil_out = Image.fromarray(im)
im_dir.mkdir(exist_ok=True)
pil_out.save((im_dir / save_name))
|
def process_batch(model, batch, window_size, window_stride, window_batch_size):
ims = batch['im']
ims_metas = batch['im_metas']
ori_shape = ims_metas[0]['ori_shape']
ori_shape = (ori_shape[0].item(), ori_shape[1].item())
filename = batch['im_metas'][0]['ori_filename'][0]
model_without_ddp = model
if ptu.distributed:
model_without_ddp = model.module
seg_pred = inference(model_without_ddp, ims, ims_metas, ori_shape, window_size, window_stride, window_batch_size)
seg_pred = seg_pred.argmax(0)
im = F.interpolate(ims[(- 1)], ori_shape, mode='bilinear')
return (filename, im.cpu(), seg_pred.cpu())
|
def eval_dataset(model, multiscale, model_dir, blend, window_size, window_stride, window_batch_size, save_images, frac_dataset, dataset_kwargs):
db = create_dataset(dataset_kwargs)
normalization = db.dataset.normalization
dataset_name = dataset_kwargs['dataset']
im_size = dataset_kwargs['image_size']
cat_names = db.base_dataset.names
n_cls = db.unwrapped.n_cls
if multiscale:
db.dataset.set_multiscale_mode()
logger = MetricLogger(delimiter=' ')
header = ''
print_freq = 50
ims = {}
seg_pred_maps = {}
idx = 0
for batch in logger.log_every(db, print_freq, header):
colors = batch['colors']
(filename, im, seg_pred) = process_batch(model, batch, window_size, window_stride, window_batch_size)
ims[filename] = im
seg_pred_maps[filename] = seg_pred
idx += 1
if (idx > (len(db) * frac_dataset)):
break
seg_gt_maps = db.dataset.get_gt_seg_maps()
if save_images:
save_dir = (model_dir / 'images')
if (ptu.dist_rank == 0):
if save_dir.exists():
shutil.rmtree(save_dir)
save_dir.mkdir()
if ptu.distributed:
torch.distributed.barrier()
for name in sorted(ims):
instance_dir = save_dir
filename = name
if (dataset_name == 'cityscapes'):
filename_list = name.split('/')
instance_dir = (instance_dir / filename_list[0])
filename = filename_list[(- 1)]
if (not instance_dir.exists()):
instance_dir.mkdir()
save_im(instance_dir, filename, ims[name], seg_pred_maps[name], torch.tensor(seg_gt_maps[name]), colors, blend, normalization)
if (ptu.dist_rank == 0):
shutil.make_archive(save_dir, 'zip', save_dir)
print(f'Saved eval images in {save_dir}.zip')
if ptu.distributed:
torch.distributed.barrier()
seg_pred_maps = gather_data(seg_pred_maps)
scores = compute_metrics(seg_pred_maps, seg_gt_maps, n_cls, ignore_index=IGNORE_LABEL, ret_cat_iou=True, distributed=ptu.distributed)
if (ptu.dist_rank == 0):
scores['inference'] = ('single_scale' if (not multiscale) else 'multi_scale')
suffix = ('ss' if (not multiscale) else 'ms')
scores['cat_iou'] = np.round((100 * scores['cat_iou']), 2).tolist()
for (k, v) in scores.items():
if ((k != 'cat_iou') and (k != 'inference')):
scores[k] = v.item()
if (k != 'cat_iou'):
print(f'{k}: {scores[k]}')
scores_str = yaml.dump(scores)
with open((model_dir / f'scores_{suffix}.yml'), 'w') as f:
f.write(scores_str)
|
@click.command()
@click.argument('model_path', type=str)
@click.argument('dataset_name', type=str)
@click.option('--im-size', default=None, type=int)
@click.option('--multiscale/--singlescale', default=False, is_flag=True)
@click.option('--blend/--no-blend', default=True, is_flag=True)
@click.option('--window-size', default=None, type=int)
@click.option('--window-stride', default=None, type=int)
@click.option('--window-batch-size', default=4, type=int)
@click.option('--save-images/--no-save-images', default=False, is_flag=True)
@click.option('-frac-dataset', '--frac-dataset', default=1.0, type=float)
def main(model_path, dataset_name, im_size, multiscale, blend, window_size, window_stride, window_batch_size, save_images, frac_dataset):
model_dir = Path(model_path).parent
ptu.set_gpu_mode(True)
distributed.init_process()
(model, variant) = load_model(model_path)
patch_size = model.patch_size
model.eval()
model.to(ptu.device)
if ptu.distributed:
model = DDP(model, device_ids=[ptu.device], find_unused_parameters=True)
cfg = config.load_config()
dataset_cfg = cfg['dataset'][dataset_name]
normalization = variant['dataset_kwargs']['normalization']
if (im_size is None):
im_size = dataset_cfg.get('im_size', variant['dataset_kwargs']['image_size'])
if (window_size is None):
window_size = variant['dataset_kwargs']['crop_size']
if (window_stride is None):
window_stride = (variant['dataset_kwargs']['crop_size'] - 32)
dataset_kwargs = dict(dataset=dataset_name, image_size=im_size, crop_size=im_size, patch_size=patch_size, batch_size=1, num_workers=10, split='val', normalization=normalization, crop=False, rep_aug=False)
eval_dataset(model, multiscale, model_dir, blend, window_size, window_stride, window_batch_size, save_images, frac_dataset, dataset_kwargs)
distributed.barrier()
distributed.destroy_process()
sys.exit(1)
|
@click.command()
@click.option('--model-path', type=str)
@click.option('--input-dir', '-i', type=str, help='folder with input images')
@click.option('--output-dir', '-o', type=str, help='folder with output images')
@click.option('--gpu/--cpu', default=True, is_flag=True)
def main(model_path, input_dir, output_dir, gpu):
ptu.set_gpu_mode(gpu)
model_dir = Path(model_path).parent
(model, variant) = load_model(model_path)
model.to(ptu.device)
normalization_name = variant['dataset_kwargs']['normalization']
normalization = STATS[normalization_name]
(cat_names, cat_colors) = dataset_cat_description(ADE20K_CATS_PATH)
input_dir = Path(input_dir)
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True)
list_dir = list(input_dir.iterdir())
for filename in tqdm(list_dir, ncols=80):
pil_im = Image.open(filename).copy()
im = (F.pil_to_tensor(pil_im).float() / 255)
im = F.normalize(im, normalization['mean'], normalization['std'])
im = im.to(ptu.device).unsqueeze(0)
im_meta = dict(flip=False)
logits = inference(model, [im], [im_meta], ori_shape=im.shape[2:4], window_size=variant['inference_kwargs']['window_size'], window_stride=variant['inference_kwargs']['window_stride'], batch_size=2)
seg_map = logits.argmax(0, keepdim=True)
seg_rgb = seg_to_rgb(seg_map, cat_colors)
seg_rgb = (255 * seg_rgb.cpu().numpy()).astype(np.uint8)
pil_seg = Image.fromarray(seg_rgb[0])
pil_blend = Image.blend(pil_im, pil_seg, 0.5).convert('RGB')
pil_blend.save((output_dir / filename.name))
|
def accuracy(output, target, topk=(1,)):
'\n https://github.com/pytorch/examples/blob/master/imagenet/main.py\n Computes the accuracy over the k top predictions for the specified values of k\n '
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
correct_k /= batch_size
res.append(correct_k)
return res
|
def gather_data(seg_pred, tmp_dir=None):
'\n distributed data gathering\n prediction and ground truth are stored in a common tmp directory\n and loaded on the master node to compute metrics\n '
if (tmp_dir is None):
tmpprefix = os.path.expandvars('$DATASET/temp')
else:
tmpprefix = os.path.expandvars(tmp_dir)
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device=ptu.device)
if (ptu.dist_rank == 0):
tmpdir = tempfile.mkdtemp(prefix=tmpprefix)
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device=ptu.device)
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
tmpdir = Path(tmpdir)
'\n Save results in temp file and load them on main process\n '
tmp_file = (tmpdir / f'part_{ptu.dist_rank}.pkl')
pkl.dump(seg_pred, open(tmp_file, 'wb'))
dist.barrier()
seg_pred = {}
if (ptu.dist_rank == 0):
for i in range(ptu.world_size):
part_seg_pred = pkl.load(open((tmpdir / f'part_{i}.pkl'), 'rb'))
seg_pred.update(part_seg_pred)
shutil.rmtree(tmpdir)
return seg_pred
|
def compute_metrics(seg_pred, seg_gt, n_cls, ignore_index=None, ret_cat_iou=False, tmp_dir=None, distributed=False):
ret_metrics_mean = torch.zeros(3, dtype=float, device=ptu.device)
if (ptu.dist_rank == 0):
list_seg_pred = []
list_seg_gt = []
keys = sorted(seg_pred.keys())
for k in keys:
list_seg_pred.append(np.asarray(seg_pred[k]))
list_seg_gt.append(np.asarray(seg_gt[k]))
ret_metrics = mean_iou(results=list_seg_pred, gt_seg_maps=list_seg_gt, num_classes=n_cls, ignore_index=ignore_index)
ret_metrics = [ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics['IoU']]
ret_metrics_mean = torch.tensor([np.round((np.nanmean(ret_metric.astype(np.float)) * 100), 2) for ret_metric in ret_metrics], dtype=float, device=ptu.device)
cat_iou = ret_metrics[2]
if distributed:
dist.broadcast(ret_metrics_mean, 0)
(pix_acc, mean_acc, miou) = ret_metrics_mean
ret = dict(pixel_accuracy=pix_acc, mean_accuracy=mean_acc, mean_iou=miou)
if (ret_cat_iou and (ptu.dist_rank == 0)):
ret['cat_iou'] = cat_iou
return ret
|
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout, out_dim=None):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
if (out_dim is None):
out_dim = dim
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.drop = nn.Dropout(dropout)
@property
def unwrapped(self):
return self
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
class Attention(nn.Module):
def __init__(self, dim, heads, dropout):
super().__init__()
self.heads = heads
head_dim = (dim // heads)
self.scale = (head_dim ** (- 0.5))
self.attn = None
self.qkv = nn.Linear(dim, (dim * 3))
self.attn_drop = nn.Dropout(dropout)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(dropout)
@property
def unwrapped(self):
return self
def forward(self, x, mask=None):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.heads, (C // self.heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q @ k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return (x, attn)
|
class Block(nn.Module):
def __init__(self, dim, heads, mlp_dim, dropout, drop_path):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.attn = Attention(dim, heads, dropout)
self.mlp = FeedForward(dim, mlp_dim, dropout)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
def forward(self, x, mask=None, return_attention=False):
(y, attn) = self.attn(self.norm1(x), mask)
if return_attention:
return attn
x = (x + self.drop_path(y))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x
|
@register_model
def vit_base_patch8_384(pretrained=False, **kwargs):
'ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.\n '
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch8_384', pretrained=pretrained, default_cfg=dict(url='', input_size=(3, 384, 384), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=1000), **model_kwargs)
return model
|
def create_vit(model_cfg):
model_cfg = model_cfg.copy()
backbone = model_cfg.pop('backbone')
normalization = model_cfg.pop('normalization')
model_cfg['n_cls'] = 1000
mlp_expansion_ratio = 4
model_cfg['d_ff'] = (mlp_expansion_ratio * model_cfg['d_model'])
if (backbone in default_cfgs):
default_cfg = default_cfgs[backbone]
else:
default_cfg = dict(pretrained=False, num_classes=1000, drop_rate=0.0, drop_path_rate=0.0, drop_block_rate=None)
default_cfg['input_size'] = (3, model_cfg['image_size'][0], model_cfg['image_size'][1])
model = VisionTransformer(**model_cfg)
if (backbone == 'vit_base_patch8_384'):
path = os.path.expandvars('$TORCH_HOME/hub/checkpoints/vit_base_patch8_384.pth')
state_dict = torch.load(path, map_location='cpu')
filtered_dict = checkpoint_filter_fn(state_dict, model)
model.load_state_dict(filtered_dict, strict=True)
elif ('deit' in backbone):
load_pretrained(model, default_cfg, filter_fn=checkpoint_filter_fn)
else:
load_custom_pretrained(model, default_cfg)
return model
|
def create_decoder(encoder, decoder_cfg):
decoder_cfg = decoder_cfg.copy()
name = decoder_cfg.pop('name')
decoder_cfg['d_encoder'] = encoder.d_model
decoder_cfg['patch_size'] = encoder.patch_size
if ('linear' in name):
decoder = DecoderLinear(**decoder_cfg)
elif (name == 'mask_transformer'):
dim = encoder.d_model
n_heads = (dim // 64)
decoder_cfg['n_heads'] = n_heads
decoder_cfg['d_model'] = dim
decoder_cfg['d_ff'] = (4 * dim)
decoder = MaskTransformer(**decoder_cfg)
else:
raise ValueError(f'Unknown decoder: {name}')
return decoder
|
def create_segmenter(model_cfg):
model_cfg = model_cfg.copy()
decoder_cfg = model_cfg.pop('decoder')
decoder_cfg['n_cls'] = model_cfg['n_cls']
encoder = create_vit(model_cfg)
decoder = create_decoder(encoder, decoder_cfg)
model = Segmenter(encoder, decoder, n_cls=model_cfg['n_cls'])
return model
|
def load_model(model_path):
variant_path = (Path(model_path).parent / 'variant.yml')
with open(variant_path, 'r') as f:
variant = yaml.load(f, Loader=yaml.FullLoader)
net_kwargs = variant['net_kwargs']
model = create_segmenter(net_kwargs)
data = torch.load(model_path, map_location=ptu.device)
checkpoint = data['model']
model.load_state_dict(checkpoint, strict=True)
return (model, variant)
|
def create_scheduler(opt_args, optimizer):
if (opt_args.sched == 'polynomial'):
lr_scheduler = PolynomialLR(optimizer, opt_args.poly_step_size, opt_args.iter_warmup, opt_args.iter_max, opt_args.poly_power, opt_args.min_lr)
else:
(lr_scheduler, _) = scheduler.create_scheduler(opt_args, optimizer)
return lr_scheduler
|
def create_optimizer(opt_args, model):
return optim.create_optimizer(opt_args, model)
|
class PolynomialLR(_LRScheduler):
def __init__(self, optimizer, step_size, iter_warmup, iter_max, power, min_lr=0, last_epoch=(- 1)):
self.step_size = step_size
self.iter_warmup = int(iter_warmup)
self.iter_max = int(iter_max)
self.power = power
self.min_lr = min_lr
super(PolynomialLR, self).__init__(optimizer, last_epoch)
def polynomial_decay(self, lr):
iter_cur = float(self.last_epoch)
if (iter_cur < self.iter_warmup):
coef = (iter_cur / self.iter_warmup)
coef *= ((1 - (self.iter_warmup / self.iter_max)) ** self.power)
else:
coef = ((1 - (iter_cur / self.iter_max)) ** self.power)
return (((lr - self.min_lr) * coef) + self.min_lr)
def get_lr(self):
if ((self.last_epoch == 0) or ((self.last_epoch % self.step_size) != 0) or (self.last_epoch > self.iter_max)):
return [group['lr'] for group in self.optimizer.param_groups]
return [self.polynomial_decay(lr) for lr in self.base_lrs]
def step_update(self, num_updates):
self.step()
|
def download_ade(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'), ('http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', 'e05747892219d10e9243933371a497e905a4860c')]
download_dir = (path / 'downloads')
download_dir.mkdir(parents=True, exist_ok=True)
for (url, checksum) in _AUG_DOWNLOAD_URLS:
filename = download(url, path=str(download_dir), overwrite=overwrite, sha1_hash=checksum)
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(path=str(path))
|
@click.command(help='Initialize ADE20K dataset.')
@click.argument('download_dir', type=str)
def main(download_dir):
dataset_dir = (Path(download_dir) / 'ade20k')
download_ade(dataset_dir, overwrite=False)
|
def download_cityscapes(path, username, password, overwrite=False):
_CITY_DOWNLOAD_URLS = [('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
download_dir = (path / 'downloads')
download_dir.mkdir(parents=True, exist_ok=True)
os.system(f"wget --keep-session-cookies --save-cookies=cookies.txt --post-data 'username={username}&password={password}&submit=Login' https://www.cityscapes-dataset.com/login/ -P {download_dir}")
if (not (download_dir / 'gtFine_trainvaltest.zip').is_file()):
os.system(f'wget --load-cookies cookies.txt --content-disposition https://www.cityscapes-dataset.com/file-handling/?packageID=1 -P {download_dir}')
if (not (download_dir / 'leftImg8bit_trainvaltest.zip').is_file()):
os.system(f'wget --load-cookies cookies.txt --content-disposition https://www.cityscapes-dataset.com/file-handling/?packageID=3 -P {download_dir}')
for (filename, checksum) in _CITY_DOWNLOAD_URLS:
with zipfile.ZipFile(str((download_dir / filename)), 'r') as zip_ref:
zip_ref.extractall(path=path)
print('Extracted', filename)
|
def install_cityscapes_api():
os.system('pip install cityscapesscripts')
try:
import cityscapesscripts
except Exception:
print(('Installing Cityscapes API failed, please install it manually %s' % repo_url))
|
def convert_json_to_label(json_file):
from cityscapesscripts.preparation.json2labelImg import json2labelImg
label_file = json_file.replace('_polygons.json', '_labelTrainIds.png')
json2labelImg(json_file, label_file, 'trainIds')
|
@click.command(help='Initialize Cityscapes dataset.')
@click.argument('download_dir', type=str)
@click.option('--username', default=USERNAME, type=str)
@click.option('--password', default=PASSWORD, type=str)
@click.option('--nproc', default=10, type=int)
def main(download_dir, username, password, nproc):
dataset_dir = (Path(download_dir) / 'cityscapes')
if ((username is None) or (password is None)):
raise ValueError('You must indicate your username and password either in the script variables or by passing options --username and --pasword.')
download_cityscapes(dataset_dir, username, password, overwrite=False)
install_cityscapes_api()
gt_dir = (dataset_dir / 'gtFine')
poly_files = []
for poly in mmcv.scandir(str(gt_dir), '_polygons.json', recursive=True):
poly_file = str((gt_dir / poly))
poly_files.append(poly_file)
mmcv.track_parallel_progress(convert_json_to_label, poly_files, nproc)
split_names = ['train', 'val', 'test']
for split in split_names:
filenames = []
for poly in mmcv.scandir(str((gt_dir / split)), '_polygons.json', recursive=True):
filenames.append(poly.replace('_gtFine_polygons.json', ''))
with open(str((dataset_dir / f'{split}.txt')), 'w') as f:
f.writelines(((f + '\n') for f in filenames))
|
def download_pcontext(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [('https://www.dropbox.com/s/wtdibo9lb2fur70/VOCtrainval_03-May-2010.tar?dl=1', 'VOCtrainval_03-May-2010.tar', 'bf9985e9f2b064752bf6bd654d89f017c76c395a'), ('https://codalabuser.blob.core.windows.net/public/trainval_merged.json', '', '169325d9f7e9047537fedca7b04de4dddf10b881'), ('https://hangzh.s3.amazonaws.com/encoding/data/pcontext/train.pth', '', '4bfb49e8c1cefe352df876c9b5434e655c9c1d07'), ('https://hangzh.s3.amazonaws.com/encoding/data/pcontext/val.pth', '', 'ebedc94247ec616c57b9a2df15091784826a7b0c')]
download_dir = (path / 'downloads')
download_dir.mkdir(parents=True, exist_ok=True)
for (url, filename, checksum) in _AUG_DOWNLOAD_URLS:
filename = download(url, path=str((download_dir / filename)), overwrite=overwrite, sha1_hash=checksum)
if (Path(filename).suffix == '.tar'):
with tarfile.open(filename) as tar:
tar.extractall(path=str(path))
else:
shutil.move(filename, str((((path / 'VOCdevkit') / 'VOC2010') / Path(filename).name)))
|
@click.command(help='Initialize PASCAL Context dataset.')
@click.argument('download_dir', type=str)
def main(download_dir):
dataset_dir = (Path(download_dir) / 'pcontext')
download_pcontext(dataset_dir, overwrite=False)
devkit_path = (dataset_dir / 'VOCdevkit')
out_dir = ((devkit_path / 'VOC2010') / 'SegmentationClassContext')
imageset_dir = (((devkit_path / 'VOC2010') / 'ImageSets') / 'SegmentationContext')
out_dir.mkdir(parents=True, exist_ok=True)
imageset_dir.mkdir(parents=True, exist_ok=True)
train_torch_path = ((devkit_path / 'VOC2010') / 'train.pth')
val_torch_path = ((devkit_path / 'VOC2010') / 'val.pth')
train_dict = torch.load(str(train_torch_path))
train_list = []
for (idx, label) in tqdm(train_dict.items()):
idx = str(idx)
new_idx = ((idx[:4] + '_') + idx[4:])
train_list.append(new_idx)
label_path = (out_dir / f'{new_idx}.png')
label.save(str(label_path))
with open(str((imageset_dir / 'train.txt')), 'w') as f:
f.writelines(((line + '\n') for line in sorted(train_list)))
val_dict = torch.load(str(val_torch_path))
val_list = []
for (idx, label) in tqdm(val_dict.items()):
idx = str(idx)
new_idx = ((idx[:4] + '_') + idx[4:])
val_list.append(new_idx)
label_path = (out_dir / f'{new_idx}.png')
label.save(str(label_path))
with open(str((imageset_dir / 'val.txt')), 'w') as f:
f.writelines(((line + '\n') for line in sorted(val_list)))
|
@click.command(help='')
@click.option('--log-dir', type=str, help='logging directory')
@click.option('--dataset', type=str)
@click.option('--im-size', default=None, type=int, help='dataset resize size')
@click.option('--crop-size', default=None, type=int)
@click.option('--window-size', default=None, type=int)
@click.option('--window-stride', default=None, type=int)
@click.option('--backbone', default='', type=str)
@click.option('--decoder', default='', type=str)
@click.option('--optimizer', default='sgd', type=str)
@click.option('--scheduler', default='polynomial', type=str)
@click.option('--weight-decay', default=0.0, type=float)
@click.option('--dropout', default=0.0, type=float)
@click.option('--drop-path', default=0.1, type=float)
@click.option('--batch-size', default=None, type=int)
@click.option('--epochs', default=None, type=int)
@click.option('-lr', '--learning-rate', default=None, type=float)
@click.option('--normalization', default=None, type=str)
@click.option('--eval-freq', default=None, type=int)
@click.option('--amp/--no-amp', default=False, is_flag=True)
@click.option('--resume/--no-resume', default=True, is_flag=True)
def main(log_dir, dataset, im_size, crop_size, window_size, window_stride, backbone, decoder, optimizer, scheduler, weight_decay, dropout, drop_path, batch_size, epochs, learning_rate, normalization, eval_freq, amp, resume):
ptu.set_gpu_mode(True)
distributed.init_process()
cfg = config.load_config()
model_cfg = cfg['model'][backbone]
dataset_cfg = cfg['dataset'][dataset]
if ('mask_transformer' in decoder):
decoder_cfg = cfg['decoder']['mask_transformer']
else:
decoder_cfg = cfg['decoder'][decoder]
if (not im_size):
im_size = dataset_cfg['im_size']
if (not crop_size):
crop_size = dataset_cfg.get('crop_size', im_size)
if (not window_size):
window_size = dataset_cfg.get('window_size', im_size)
if (not window_stride):
window_stride = dataset_cfg.get('window_stride', im_size)
model_cfg['image_size'] = (crop_size, crop_size)
model_cfg['backbone'] = backbone
model_cfg['dropout'] = dropout
model_cfg['drop_path_rate'] = drop_path
decoder_cfg['name'] = decoder
model_cfg['decoder'] = decoder_cfg
world_batch_size = dataset_cfg['batch_size']
num_epochs = dataset_cfg['epochs']
lr = dataset_cfg['learning_rate']
if batch_size:
world_batch_size = batch_size
if epochs:
num_epochs = epochs
if learning_rate:
lr = learning_rate
if (eval_freq is None):
eval_freq = dataset_cfg.get('eval_freq', 1)
if normalization:
model_cfg['normalization'] = normalization
batch_size = (world_batch_size // ptu.world_size)
variant = dict(world_batch_size=world_batch_size, version='normal', resume=resume, dataset_kwargs=dict(dataset=dataset, image_size=im_size, crop_size=crop_size, batch_size=batch_size, normalization=model_cfg['normalization'], split='train', num_workers=10), algorithm_kwargs=dict(batch_size=batch_size, start_epoch=0, num_epochs=num_epochs, eval_freq=eval_freq), optimizer_kwargs=dict(opt=optimizer, lr=lr, weight_decay=weight_decay, momentum=0.9, clip_grad=None, sched=scheduler, epochs=num_epochs, min_lr=1e-05, poly_power=0.9, poly_step_size=1), net_kwargs=model_cfg, amp=amp, log_dir=log_dir, inference_kwargs=dict(im_size=im_size, window_size=window_size, window_stride=window_stride))
log_dir = Path(log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
checkpoint_path = (log_dir / 'checkpoint.pth')
dataset_kwargs = variant['dataset_kwargs']
train_loader = create_dataset(dataset_kwargs)
val_kwargs = dataset_kwargs.copy()
val_kwargs['split'] = 'val'
val_kwargs['batch_size'] = 1
val_kwargs['crop'] = False
val_loader = create_dataset(val_kwargs)
n_cls = train_loader.unwrapped.n_cls
net_kwargs = variant['net_kwargs']
net_kwargs['n_cls'] = n_cls
model = create_segmenter(net_kwargs)
model.to(ptu.device)
optimizer_kwargs = variant['optimizer_kwargs']
optimizer_kwargs['iter_max'] = (len(train_loader) * optimizer_kwargs['epochs'])
optimizer_kwargs['iter_warmup'] = 0.0
opt_args = argparse.Namespace()
opt_vars = vars(opt_args)
for (k, v) in optimizer_kwargs.items():
opt_vars[k] = v
optimizer = create_optimizer(opt_args, model)
lr_scheduler = create_scheduler(opt_args, optimizer)
num_iterations = 0
amp_autocast = suppress
loss_scaler = None
if amp:
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if (resume and checkpoint_path.exists()):
print(f'Resuming training from checkpoint: {checkpoint_path}')
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
if (loss_scaler and ('loss_scaler' in checkpoint)):
loss_scaler.load_state_dict(checkpoint['loss_scaler'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
variant['algorithm_kwargs']['start_epoch'] = (checkpoint['epoch'] + 1)
else:
sync_model(log_dir, model)
if ptu.distributed:
model = DDP(model, device_ids=[ptu.device], find_unused_parameters=True)
variant_str = yaml.dump(variant)
print(f'''Configuration:
{variant_str}''')
variant['net_kwargs'] = net_kwargs
variant['dataset_kwargs'] = dataset_kwargs
log_dir.mkdir(parents=True, exist_ok=True)
with open((log_dir / 'variant.yml'), 'w') as f:
f.write(variant_str)
start_epoch = variant['algorithm_kwargs']['start_epoch']
num_epochs = variant['algorithm_kwargs']['num_epochs']
eval_freq = variant['algorithm_kwargs']['eval_freq']
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
val_seg_gt = val_loader.dataset.get_gt_seg_maps()
print(f'Train dataset length: {len(train_loader.dataset)}')
print(f'Val dataset length: {len(val_loader.dataset)}')
print(f'Encoder parameters: {num_params(model_without_ddp.encoder)}')
print(f'Decoder parameters: {num_params(model_without_ddp.decoder)}')
for epoch in range(start_epoch, num_epochs):
train_logger = train_one_epoch(model, train_loader, optimizer, lr_scheduler, epoch, amp_autocast, loss_scaler)
if (ptu.dist_rank == 0):
snapshot = dict(model=model_without_ddp.state_dict(), optimizer=optimizer.state_dict(), n_cls=model_without_ddp.n_cls, lr_scheduler=lr_scheduler.state_dict())
if (loss_scaler is not None):
snapshot['loss_scaler'] = loss_scaler.state_dict()
snapshot['epoch'] = epoch
torch.save(snapshot, checkpoint_path)
eval_epoch = (((epoch % eval_freq) == 0) or (epoch == (num_epochs - 1)))
if eval_epoch:
eval_logger = evaluate(model, val_loader, val_seg_gt, window_size, window_stride, amp_autocast)
print(f'Stats [{epoch}]:', eval_logger, flush=True)
print('')
if (ptu.dist_rank == 0):
train_stats = {k: meter.global_avg for (k, meter) in train_logger.meters.items()}
val_stats = {}
if eval_epoch:
val_stats = {k: meter.global_avg for (k, meter) in eval_logger.meters.items()}
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'val_{k}': v for (k, v) in val_stats.items()}, 'epoch': epoch, 'num_updates': ((epoch + 1) * len(train_loader))}
with open((log_dir / 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
distributed.barrier()
distributed.destroy_process()
sys.exit(1)
|
def init_process(backend='nccl'):
print(f'Starting process with rank {ptu.dist_rank}...', flush=True)
if ('SLURM_STEPS_GPUS' in os.environ):
gpu_ids = os.environ['SLURM_STEP_GPUS'].split(',')
os.environ['MASTER_PORT'] = str((12345 + int(min(gpu_ids))))
else:
os.environ['MASTER_PORT'] = str(12345)
if ('SLURM_JOB_NODELIST' in os.environ):
hostnames = hostlist.expand_hostlist(os.environ['SLURM_JOB_NODELIST'])
os.environ['MASTER_ADDR'] = hostnames[0]
else:
os.environ['MASTER_ADDR'] = '127.0.0.1'
dist.init_process_group(backend, rank=ptu.dist_rank, world_size=ptu.world_size)
print(f'Process {ptu.dist_rank} is connected.', flush=True)
dist.barrier()
silence_print((ptu.dist_rank == 0))
if (ptu.dist_rank == 0):
print(f'All processes are connected.', flush=True)
|
def silence_print(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def sync_model(sync_dir, model):
sync_path = (Path(sync_dir).resolve() / 'sync_model.pkl')
if ((ptu.dist_rank == 0) and (ptu.world_size > 1)):
torch.save(model.state_dict(), sync_path)
dist.barrier()
if (ptu.dist_rank > 0):
model.load_state_dict(torch.load(sync_path))
dist.barrier()
if ((ptu.dist_rank == 0) and (ptu.world_size > 1)):
sync_path.unlink()
return model
|
def barrier():
dist.barrier()
|
def destroy_process():
dist.destroy_process_group()
|
def check_sha1(filename, sha1_hash):
'Check whether the sha1 hash of the file content matches the expected hash.\n Parameters\n ----------\n filename : str\n Path to the file.\n sha1_hash : str\n Expected sha1 hash in hexadecimal digits.\n Returns\n -------\n bool\n Whether the file content matches the expected hash.\n '
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if (not data):
break
sha1.update(data)
return (sha1.hexdigest() == sha1_hash)
|
def download(url, path=None, overwrite=False, sha1_hash=None):
"\n https://github.com/junfu1115/DANet/blob/master/encoding/utils/files.py\n Download a given URL\n Parameters\n ----------\n url : str\n URL to download\n path : str, optional\n Destination path to store downloaded file. By default stores to the\n current directory with same name as in url.\n overwrite : bool, optional\n Whether to overwrite destination file if already exists.\n sha1_hash : str, optional\n Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified\n but doesn't match.\n Returns\n -------\n str\n The file path of the downloaded file.\n "
if (path is None):
fname = url.split('/')[(- 1)]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
print(('Downloading %s from %s...' % (fname, url)))
r = requests.get(url, stream=True)
if (r.status_code != 200):
raise RuntimeError(('Failed downloading url %s' % url))
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if (total_length is None):
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024), total=int(((total_length / 1024.0) + 0.5)), unit='KB', unit_scale=False, dynamic_ncols=True):
f.write(chunk)
if (sha1_hash and (not check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(fname))
return fname
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device=ptu.device)
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, n=1, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v, n)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)), flush=True)
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)), flush=True)
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def set_gpu_mode(mode):
global use_gpu
global device
global gpu_id
global distributed
global dist_rank
global world_size
gpu_id = int(os.environ.get('SLURM_LOCALID', 0))
dist_rank = int(os.environ.get('SLURM_PROCID', 0))
world_size = int(os.environ.get('SLURM_NTASKS', 1))
distributed = (world_size > 1)
use_gpu = mode
device = torch.device((f'cuda:{gpu_id}' if use_gpu else 'cpu'))
torch.backends.cudnn.benchmark = True
|
def read_requirements_file(filename):
req_file_path = path.join(path.dirname(path.realpath(__file__)), filename)
with open(req_file_path) as f:
return [line.strip() for line in f]
|
def build_optimizer(model, length_train_loader, config):
optimizer_class = getattr(transformers, 'AdamW')
optimizer = optimizer_class(model.model.parameters(), lr=float(config['lr']))
num_training_steps = (config['train_epochs'] * length_train_loader)
lr_scheduler = get_scheduler(name='linear', optimizer=optimizer, num_warmup_steps=config['warmup_iterations'], num_training_steps=num_training_steps)
return (optimizer, lr_scheduler)
|
def build_model(config):
available_models = ['bertqa', 'longformer', 'bigbird', 'layoutlmv2', 'layoutlmv3', 't5', 'vt5', 'hi-vt5']
if ((config['model_name'].lower() == 'bert') or (config['model_name'].lower() == 'bertqa')):
from models.BertQA import BertQA
model = BertQA(config)
elif (config['model_name'].lower() == 'longformer'):
from models.Longformer import Longformer
model = Longformer(config)
elif (config['model_name'].lower() == 'bigbird'):
from models.BigBird import BigBird
model = BigBird(config)
elif (config['model_name'].lower() == 'layoutlmv2'):
from models.LayoutLMv2 import LayoutLMv2
model = LayoutLMv2(config)
elif (config['model_name'].lower() == 'layoutlmv3'):
from models.LayoutLMv3 import LayoutLMv3
model = LayoutLMv3(config)
elif (config['model_name'].lower() == 't5'):
from models.T5 import T5
model = T5(config)
elif (config['model_name'].lower() == 'vt5'):
from models.VT5 import ProxyVT5 as VT5
model = VT5(config)
elif (config['model_name'].lower() in ['hivt5', 'hi-vt5']):
from models.HiVT5 import Proxy_HiVT5 as HiVT5
model = HiVT5(config)
else:
raise ValueError("Value '{:s}' for model selection not expected. Please choose one of {:}".format(config['model_name'], ', '.join(available_models)))
if ((config['device'] == 'cuda') and config['data_parallel'] and (torch.cuda.device_count() > 1)):
model.parallelize()
model.model.to(config['device'])
return model
|
def build_dataset(config, split):
dataset_kwargs = {}
if (config['model_name'].lower() in ['layoutlmv2', 'layoutlmv3', 'lt5', 'vt5', 'hilt5', 'hi-lt5', 'hivt5', 'hi-vt5']):
dataset_kwargs['get_raw_ocr_data'] = True
if (config['model_name'].lower() in ['layoutlmv2', 'layoutlmv3', 'vt5', 'hivt5', 'hi-vt5']):
dataset_kwargs['use_images'] = True
if (config['model_name'].lower() in ['hilt5', 'hi-lt5', 'hivt5', 'hi-vt5']):
dataset_kwargs['max_pages'] = config.get('max_pages', 1)
dataset_kwargs['hierarchical_method'] = True
if (config['dataset_name'] == 'SP-DocVQA'):
from datasets.SP_DocVQA import SPDocVQA
dataset = SPDocVQA(config['imdb_dir'], config['images_dir'], split, dataset_kwargs)
elif (config['dataset_name'] == 'MP-DocVQA'):
from datasets.MP_DocVQA import MPDocVQA
dataset = MPDocVQA(config['imdb_dir'], config['images_dir'], config['page_retrieval'], split, dataset_kwargs)
elif (config['dataset_name'] == 'DUDE'):
from datasets.DUDE import DUDE
dataset = DUDE(config['imdb_dir'], config['images_dir'], config['page_retrieval'], split, dataset_kwargs)
else:
raise ValueError
return dataset
|
def save_model(model, epoch, update_best=False, **kwargs):
save_dir = os.path.join(kwargs['save_dir'], 'checkpoints', '{:s}_{:s}_{:s}'.format(kwargs['model_name'].lower(), kwargs.get('page_retrieval', '').lower(), kwargs['dataset_name'].lower()))
model.model.save_pretrained(os.path.join(save_dir, 'model__{:d}.ckpt'.format(epoch)))
tokenizer = (model.tokenizer if hasattr(model, 'tokenizer') else (model.processor if hasattr(model, 'processor') else None))
if (tokenizer is not None):
tokenizer.save_pretrained(os.path.join(save_dir, 'model__{:d}.ckpt'.format(epoch)))
if hasattr(model.model, 'visual_embeddings'):
model.model.visual_embeddings.feature_extractor.save_pretrained(os.path.join(save_dir, 'model__{:d}.ckpt'.format(epoch)))
save_yaml(os.path.join(save_dir, 'model__{:d}.ckpt'.format(epoch), 'experiment_config.yml'), kwargs)
if update_best:
model.model.save_pretrained(os.path.join(save_dir, 'best.ckpt'))
tokenizer.save_pretrained(os.path.join(save_dir, 'best.ckpt'))
save_yaml(os.path.join(save_dir, 'best.ckpt', 'experiment_config.yml'), kwargs)
|
def load_model(base_model, ckpt_name, **kwargs):
load_dir = kwargs['save_dir']
base_model.model.from_pretrained(os.path.join(load_dir, ckpt_name))
|
class DUDE(MPDocVQA):
def __init__(self, imbd_dir, images_dir, page_retrieval, split, kwargs):
super(DUDE, self).__init__(imbd_dir, images_dir, page_retrieval, split, kwargs)
if (self.page_retrieval == 'oracle'):
raise ValueError("'Oracle' set-up is not valid for DUDE, since there is no GT for the answer page.")
def __getitem__(self, idx):
record = self.imdb[idx]
question = record['question']
answers = list(set((answer.lower() for answer in record['answers'])))
answer_page_idx = None
num_pages = record['num_pages']
if (self.page_retrieval == 'oracle'):
raise ValueError("'Oracle' set-up is not valid for DUDE, since there is no GT for the answer page.")
'\n context = \' \'.join([word.lower() for word in record[\'ocr_tokens\'][answer_page_idx]])\n context_page_corresp = None\n\n if self.use_images:\n image_names = os.path.join(self.images_dir, "{:s}".format(record[\'image_name\'][answer_page_idx]))\n images = Image.open(image_names).convert("RGB")\n\n if self.get_raw_ocr_data:\n words = [word.lower() for word in record[\'ocr_tokens\'][answer_page_idx]]\n boxes = np.array([bbox for bbox in record[\'ocr_normalized_boxes\'][answer_page_idx]])\n '
elif (self.page_retrieval == 'concat'):
context = ''
context_page_corresp = []
for page_ix in range(record['num_pages']):
page_context = ' '.join([word.lower() for word in record['ocr_tokens'][page_ix]])
context += (' ' + page_context)
context_page_corresp.extend(([(- 1)] + ([page_ix] * len(page_context))))
context = context.strip()
context_page_corresp = context_page_corresp[1:]
if self.get_raw_ocr_data:
(words, boxes) = ([], [])
for p in range(num_pages):
if (len(record['ocr_tokens'][p]) == 0):
continue
words.extend([word.lower() for word in record['ocr_tokens'][p]])
"\n mod_boxes = np.array(record['ocr_normalized_boxes'][p])\n mod_boxes[:, 1] = mod_boxes[:, 1]/num_pages + p/num_pages\n mod_boxes[:, 3] = mod_boxes[:, 3]/num_pages + p/num_pages\n\n boxes.extend(mod_boxes) # bbox in l,t,r,b\n "
boxes = record['ocr_normalized_boxes']
if self.use_images:
image_names = [os.path.join(self.images_dir, '{:s}'.format(image_name)) for image_name in record['image_name']]
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
images += [Image.new('RGB', (2, 2)) for i in range((self.max_pages - len(image_names)))]
(images, boxes) = utils.create_grid_image(images, boxes)
else:
boxes = np.array(boxes)
elif (self.page_retrieval == 'logits'):
context = []
for page_ix in range(record['num_pages']):
context.append(' '.join([word.lower() for word in record['ocr_tokens'][page_ix]]))
context_page_corresp = None
if self.use_images:
image_names = [os.path.join(self.images_dir, '{:s}'.format(image_name)) for image_name in record['image_name']]
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
if self.get_raw_ocr_data:
words = []
boxes = [np.array(page_boxes) for page_boxes in record['ocr_normalized_boxes']]
for p in range(num_pages):
words.append([word.lower() for word in record['ocr_tokens'][p]])
elif (self.page_retrieval == 'custom'):
(first_page, last_page) = self.get_pages(record)
answer_page_idx = (answer_page_idx - first_page)
num_pages = len(range(first_page, last_page))
words = []
boxes = []
context = []
image_names = []
for page_ix in range(first_page, last_page):
words.append([word.lower() for word in record['ocr_tokens'][page_ix]])
boxes.append(np.array(record['ocr_normalized_boxes'][page_ix], dtype=np.float32))
context.append(' '.join([word.lower() for word in record['ocr_tokens'][page_ix]]))
image_names.append(os.path.join(self.images_dir, '{:s}'.format(record['image_name'][page_ix])))
context_page_corresp = None
if (num_pages < self.max_pages):
for _ in range((self.max_pages - num_pages)):
words.append([''])
boxes.append(np.zeros([1, 4], dtype=np.float32))
if self.use_images:
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
images += [Image.new('RGB', (0, 0)) for i in range((self.max_pages - len(image_names)))]
if ((self.page_retrieval == 'oracle') or (self.page_retrieval == 'concat')):
(start_idxs, end_idxs) = self._get_start_end_idx(context, answers)
elif (self.page_retrieval == 'logits'):
(start_idxs, end_idxs) = ([], [])
for page_ix in range(record['num_pages']):
(s, e) = self._get_start_end_idx(context[page_ix], answers)
start_idxs.append(s)
end_idxs.append(e)
sample_info = {'question_id': record['question_id'], 'questions': question, 'contexts': context, 'context_page_corresp': context_page_corresp, 'answers': answers, 'answer_page_idx': answer_page_idx, 'answer_type': record['extra']['answer_type']}
if self.use_images:
sample_info['image_names'] = image_names
sample_info['images'] = images
if self.get_raw_ocr_data:
sample_info['words'] = words
sample_info['boxes'] = boxes
sample_info['num_pages'] = num_pages
else:
sample_info['start_indxs'] = start_idxs
sample_info['end_indxs'] = end_idxs
if self.get_doc_id:
sample_info['doc_id'] = [record['image_name'][page_ix] for page_ix in range(first_page, last_page)]
return sample_info
|
class MPDocVQA(Dataset):
def __init__(self, imbd_dir, images_dir, page_retrieval, split, kwargs):
data = np.load(os.path.join(imbd_dir, 'imdb_{:s}.npy'.format(split)), allow_pickle=True)
self.header = data[0]
self.imdb = data[1:]
self.page_retrieval = page_retrieval.lower()
assert (self.page_retrieval in ['oracle', 'concat', 'logits', 'custom'])
self.max_answers = 2
self.images_dir = images_dir
self.use_images = kwargs.get('use_images', False)
self.get_raw_ocr_data = kwargs.get('get_raw_ocr_data', False)
self.max_pages = kwargs.get('max_pages', 1)
self.get_doc_id = False
def __len__(self):
return len(self.imdb)
def sample(self, idx=None, question_id=None):
if (idx is not None):
return self.__getitem__(idx)
if (question_id is not None):
for idx in range(self.__len__()):
record = self.imdb[idx]
if (record['question_id'] == question_id):
return self.__getitem__(idx)
raise ValueError('Question ID {:d} not in dataset.'.format(question_id))
idx = random.randint(0, self.__len__())
return self.__getitem__(idx)
def __getitem__(self, idx):
record = self.imdb[idx]
question = record['question']
answers = list(set((answer.lower() for answer in record['answers'])))
answer_page_idx = record['answer_page_idx']
num_pages = record['imdb_doc_pages']
if (self.page_retrieval == 'oracle'):
context = ' '.join([word.lower() for word in record['ocr_tokens'][answer_page_idx]])
context_page_corresp = None
num_pages = 1
if self.use_images:
image_names = os.path.join(self.images_dir, '{:s}.jpg'.format(record['image_name'][answer_page_idx]))
images = Image.open(image_names).convert('RGB')
if self.get_raw_ocr_data:
words = [word.lower() for word in record['ocr_tokens'][answer_page_idx]]
boxes = np.array([bbox for bbox in record['ocr_normalized_boxes'][answer_page_idx]])
elif (self.page_retrieval == 'concat'):
context = ''
context_page_corresp = []
for page_ix in range(record['imdb_doc_pages']):
page_context = ' '.join([word.lower() for word in record['ocr_tokens'][page_ix]])
context += (' ' + page_context)
context_page_corresp.extend(([(- 1)] + ([page_ix] * len(page_context))))
context = context.strip()
context_page_corresp = context_page_corresp[1:]
if self.get_raw_ocr_data:
words = []
for p in range(num_pages):
words.extend([word.lower() for word in record['ocr_tokens'][p]])
"\n mod_boxes = record['ocr_normalized_boxes'][p]\n mod_boxes[:, 1] = mod_boxes[:, 1]/num_pages + p/num_pages\n mod_boxes[:, 3] = mod_boxes[:, 3]/num_pages + p/num_pages\n\n boxes.extend(mod_boxes) # bbox in l,t,r,b\n "
boxes = record['ocr_normalized_boxes']
else:
(words, boxes) = (None, None)
if self.use_images:
image_names = [os.path.join(self.images_dir, '{:s}.jpg'.format(image_name)) for image_name in record['image_name']]
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
(images, boxes) = utils.create_grid_image(images, boxes)
else:
boxes = np.array(boxes)
elif (self.page_retrieval == 'logits'):
context = []
for page_ix in range(record['imdb_doc_pages']):
context.append(' '.join([word.lower() for word in record['ocr_tokens'][page_ix]]))
context_page_corresp = None
if self.use_images:
image_names = [os.path.join(self.images_dir, '{:s}.jpg'.format(image_name)) for image_name in record['image_name']]
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
if self.get_raw_ocr_data:
words = []
boxes = record['ocr_normalized_boxes']
for p in range(num_pages):
words.append([word.lower() for word in record['ocr_tokens'][p]])
elif (self.page_retrieval == 'custom'):
(first_page, last_page) = self.get_pages(record)
answer_page_idx = (answer_page_idx - first_page)
num_pages = len(range(first_page, last_page))
words = []
boxes = []
context = []
image_names = []
for page_ix in range(first_page, last_page):
words.append([word.lower() for word in record['ocr_tokens'][page_ix]])
boxes.append(np.array(record['ocr_normalized_boxes'][page_ix], dtype=np.float32))
context.append(' '.join([word.lower() for word in record['ocr_tokens'][page_ix]]))
image_names.append(os.path.join(self.images_dir, '{:s}.jpg'.format(record['image_name'][page_ix])))
context_page_corresp = None
if (num_pages < self.max_pages):
for _ in range((self.max_pages - num_pages)):
words.append([''])
boxes.append(np.zeros([1, 4], dtype=np.float32))
if self.use_images:
images = [Image.open(img_path).convert('RGB') for img_path in image_names]
images += [Image.new('RGB', (2, 2)) for i in range((self.max_pages - len(image_names)))]
if (self.page_retrieval in ['oracle', 'concat', 'none']):
(start_idxs, end_idxs) = self._get_start_end_idx(context, answers)
elif (self.page_retrieval == 'logits'):
(start_idxs, end_idxs) = self._get_start_end_idx(context[answer_page_idx], answers)
else:
(start_idxs, end_idxs) = (None, None)
sample_info = {'question_id': record['question_id'], 'questions': question, 'contexts': context, 'context_page_corresp': context_page_corresp, 'answers': answers, 'answer_page_idx': answer_page_idx, 'num_pages': num_pages}
if self.use_images:
sample_info['image_names'] = image_names
sample_info['images'] = images
if self.get_raw_ocr_data:
sample_info['words'] = words
sample_info['boxes'] = boxes
else:
sample_info['start_indxs'] = start_idxs
sample_info['end_indxs'] = end_idxs
if self.get_doc_id:
sample_info['doc_id'] = [record['image_name'][page_ix] for page_ix in range(first_page, last_page)]
return sample_info
def _get_start_end_idx(self, context, answers):
answer_positions = []
for answer in answers:
start_idx = context.find(answer)
if (start_idx != (- 1)):
end_idx = (start_idx + len(answer))
answer_positions.append([start_idx, end_idx])
if (len(answer_positions) > 0):
(start_idx, end_idx) = random.choice(answer_positions)
else:
(start_idx, end_idx) = (0, 0)
return (start_idx, end_idx)
def get_pages(self, sample_info):
answer_page = sample_info['answer_page_idx']
document_pages = sample_info['imdb_doc_pages']
if (document_pages <= self.max_pages):
(first_page, last_page) = (0, document_pages)
else:
first_page_lower_bound = max(0, ((answer_page - self.max_pages) + 1))
first_page_upper_bound = answer_page
first_page = random.randint(first_page_lower_bound, first_page_upper_bound)
last_page = (first_page + self.max_pages)
if (last_page > document_pages):
last_page = document_pages
first_page = (last_page - self.max_pages)
try:
assert (answer_page in range(first_page, last_page))
assert ((last_page - first_page) == self.max_pages)
except:
assert (answer_page in range(first_page, last_page))
assert ((last_page - first_page) == self.max_pages)
assert (answer_page in range(first_page, last_page))
assert (first_page >= 0)
assert (last_page <= document_pages)
return (first_page, last_page)
|
def mpdocvqa_collate_fn(batch):
batch = {k: [dic[k] for dic in batch] for k in batch[0]}
return batch
|
class SPDocVQA(Dataset):
def __init__(self, imbd_dir, images_dir, split, kwargs):
data = np.load(os.path.join(imbd_dir, 'new_imdb_{:s}.npy'.format(split)), allow_pickle=True)
self.header = data[0]
self.imdb = data[1:]
self.hierarchical_method = kwargs.get('hierarchical_method', False)
self.max_answers = 2
self.images_dir = images_dir
self.use_images = kwargs.get('use_images', False)
self.get_raw_ocr_data = kwargs.get('get_raw_ocr_data', False)
def __len__(self):
return len(self.imdb)
def __getitem__(self, idx):
record = self.imdb[idx]
question = record['question']
context = ' '.join([word.lower() for word in record['ocr_tokens']])
context_page_corresp = [0 for ix in range(len(context))]
answers = list(set((answer.lower() for answer in record['answers'])))
if self.use_images:
image_name = os.path.join(self.images_dir, '{:s}.png'.format(record['image_name']))
image = Image.open(image_name).convert('RGB')
if self.get_raw_ocr_data:
words = [word.lower() for word in record['ocr_tokens']]
boxes = np.array([bbox for bbox in record['ocr_normalized_boxes']])
if self.hierarchical_method:
words = [words]
boxes = [boxes]
image_name = [image_name]
image = [image]
(start_idxs, end_idxs) = self._get_start_end_idx(context, answers)
sample_info = {'question_id': record['question_id'], 'questions': question, 'contexts': context, 'answers': answers, 'start_indxs': start_idxs, 'end_indxs': end_idxs}
if self.use_images:
sample_info['image_names'] = image_name
sample_info['images'] = image
if self.get_raw_ocr_data:
sample_info['words'] = words
sample_info['boxes'] = boxes
sample_info['num_pages'] = 1
sample_info['answer_page_idx'] = 0
else:
sample_info['context_page_corresp'] = context_page_corresp
sample_info['start_indxs'] = start_idxs
sample_info['end_indxs'] = end_idxs
return sample_info
def _get_start_end_idx(self, context, answers):
answer_positions = []
for answer in answers:
start_idx = context.find(answer)
if (start_idx != (- 1)):
end_idx = (start_idx + len(answer))
answer_positions.append([start_idx, end_idx])
if (len(answer_positions) > 0):
(start_idx, end_idx) = random.choice(answer_positions)
else:
(start_idx, end_idx) = (0, 0)
return (start_idx, end_idx)
|
def singlepage_docvqa_collate_fn(batch):
batch = {k: [dic[k] for dic in batch] for k in batch[0]}
return batch
|
def evaluate(data_loader, model, evaluator, **kwargs):
return_scores_by_sample = kwargs.get('return_scores_by_sample', False)
return_answers = kwargs.get('return_answers', False)
if return_scores_by_sample:
scores_by_samples = {}
total_accuracies = []
total_anls = []
total_ret_prec = []
else:
total_accuracies = 0
total_anls = 0
total_ret_prec = 0
all_pred_answers = []
model.model.eval()
for (batch_idx, batch) in enumerate(tqdm(data_loader)):
bs = len(batch['question_id'])
with torch.no_grad():
(outputs, pred_answers, pred_answer_page, answer_conf) = model.forward(batch, return_pred_answer=True)
metric = evaluator.get_metrics(batch['answers'], pred_answers, batch.get('answer_type', None))
if (('answer_page_idx' in batch) and (pred_answer_page is not None)):
ret_metric = evaluator.get_retrieval_metric(batch['answer_page_idx'], pred_answer_page)
else:
ret_metric = [0 for _ in range(bs)]
if return_scores_by_sample:
for batch_idx in range(bs):
scores_by_samples[batch['question_id'][batch_idx]] = {'accuracy': metric['accuracy'][batch_idx], 'anls': metric['anls'][batch_idx], 'ret_prec': ret_metric[batch_idx], 'pred_answer': pred_answers[batch_idx], 'pred_answer_conf': answer_conf[batch_idx], 'pred_answer_page': (pred_answer_page[batch_idx] if (pred_answer_page is not None) else None)}
if return_scores_by_sample:
total_accuracies.extend(metric['accuracy'])
total_anls.extend(metric['anls'])
total_ret_prec.extend(ret_metric)
else:
total_accuracies += sum(metric['accuracy'])
total_anls += sum(metric['anls'])
total_ret_prec += sum(ret_metric)
if return_answers:
all_pred_answers.extend(pred_answers)
if (not return_scores_by_sample):
total_accuracies = (total_accuracies / len(data_loader.dataset))
total_anls = (total_anls / len(data_loader.dataset))
total_ret_prec = (total_ret_prec / len(data_loader.dataset))
scores_by_samples = []
return (total_accuracies, total_anls, total_ret_prec, all_pred_answers, scores_by_samples)
|
class Logger():
def __init__(self, config):
self.log_folder = config['save_dir']
experiment_date = datetime.datetime.now().strftime('%Y.%m.%d_%H.%M.%S')
self.experiment_name = '{:s}__{:}'.format(config['model_name'], experiment_date)
machine_dict = {'cvc117': 'Local', 'cudahpc16': 'DAG', 'cudahpc25': 'DAG-A40'}
machine = machine_dict.get(socket.gethostname(), socket.gethostname())
dataset = config['dataset_name']
page_retrieval = config.get('page_retrieval', '-').capitalize()
visual_encoder = config.get('visual_module', {}).get('model', '-').upper()
document_pages = config.get('max_pages', None)
page_tokens = config.get('page_tokens', None)
tags = [config['model_name'], dataset, machine]
config = {'Model': config['model_name'], 'Weights': config['model_weights'], 'Dataset': dataset, 'Page retrieval': page_retrieval, 'Visual Encoder': visual_encoder, 'Batch size': config['batch_size'], 'Max. Seq. Length': config.get('max_sequence_length', '-'), 'lr': config['lr'], 'seed': config['seed']}
if document_pages:
config['Max Pages'] = document_pages
if page_tokens:
config['PAGE tokens'] = page_tokens
self.logger = wb.init(project='MP-DocVQA', name=self.experiment_name, dir=self.log_folder, tags=tags, config=config)
self._print_config(config)
self.current_epoch = 0
self.len_dataset = 0
def _print_config(self, config):
print('{:s}: {:s} \n{{'.format(config['Model'], config['Weights']))
for (k, v) in config.items():
if ((k != 'Model') and (k != 'Weights')):
print('\t{:}: {:}'.format(k, v))
print('}\n')
def log_model_parameters(self, model):
total_params = sum((p.numel() for p in model.model.parameters()))
trainable_params = sum((p.numel() for p in model.model.parameters() if p.requires_grad))
self.logger.config.update({'Model Params': int((total_params / 1000000.0)), 'Model Trainable Params': int((trainable_params / 1000000.0))})
print('Model parameters: {:d} - Trainable: {:d} ({:2.2f}%)'.format(total_params, trainable_params, ((trainable_params / total_params) * 100)))
def log_val_metrics(self, accuracy, anls, ret_prec, update_best=False):
str_msg = 'Epoch {:d}: Accuracy {:2.2f} ANLS {:2.4f} Retrieval precision: {:2.2f}%'.format(self.current_epoch, (accuracy * 100), anls, (ret_prec * 100))
self.logger.log({'Val/Epoch Accuracy': accuracy, 'Val/Epoch ANLS': anls, 'Val/Epoch Ret. Prec': ret_prec}, step=((self.current_epoch * self.len_dataset) + self.len_dataset))
if update_best:
str_msg += '\tBest Accuracy!'
self.logger.config.update({'Best Accuracy': accuracy, 'Best epoch': self.current_epoch}, allow_val_change=True)
print(str_msg)
|
class Evaluator():
def __init__(self, case_sensitive=False):
self.case_sensitive = case_sensitive
self.get_edit_distance = editdistance.eval
self.anls_threshold = 0.5
self.total_accuracies = []
self.total_anls = []
self.best_accuracy = 0
self.best_epoch = 0
def get_metrics(self, gt_answers, preds, answer_types=None, update_global_metrics=True):
answer_types = (answer_types if (answer_types is not None) else ['string' for batch_idx in range(len(gt_answers))])
batch_accuracy = []
batch_anls = []
for batch_idx in range(len(preds)):
gt = [self._preprocess_str(gt_elm) for gt_elm in gt_answers[batch_idx]]
pred = self._preprocess_str(preds[batch_idx])
batch_accuracy.append(self._calculate_accuracy(gt, pred, answer_types[batch_idx]))
batch_anls.append(self._calculate_anls(gt, pred, answer_types[batch_idx]))
return {'accuracy': batch_accuracy, 'anls': batch_anls}
def get_retrieval_metric(self, gt_answer_page, pred_answer_page):
retrieval_precision = [(1 if (gt == pred) else 0) for (gt, pred) in zip(gt_answer_page, pred_answer_page)]
return retrieval_precision
def update_global_metrics(self, accuracy, anls, current_epoch):
if (accuracy > self.best_accuracy):
self.best_accuracy = accuracy
self.best_epoch = current_epoch
return True
else:
return False
def _preprocess_str(self, string):
if (not self.case_sensitive):
string = string.lower()
return string.strip()
def _calculate_accuracy(self, gt, pred, answer_type):
if (answer_type == 'not-answerable'):
return (1 if (pred in ['', 'none', 'NA', None, []]) else 0)
if ((pred == 'none') and (answer_type != 'not-answerable')):
return 0
for gt_elm in gt:
if (gt_elm == pred):
return 1
return 0
def _calculate_anls(self, gt, pred, answer_type):
if (len(pred) == 0):
return 0
if (answer_type == 'not-answerable'):
return (1 if (pred in ['', 'none', 'NA', None, []]) else 0)
if ((pred == 'none') and (answer_type != 'not-answerable')):
return 0
answers_similarity = [(1 - (self.get_edit_distance(gt_elm, pred) / max(len(gt_elm), len(pred)))) for gt_elm in gt]
max_similarity = max(answers_similarity)
anls = (max_similarity if (max_similarity >= self.anls_threshold) else 0)
return anls
|
class BertQA():
def __init__(self, config):
self.batch_size = config['batch_size']
self.model = AutoModelForQuestionAnswering.from_pretrained(config['model_weights'])
self.tokenizer = AutoTokenizer.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.max_sequence_length = config.get('max_sequence_length', 512)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def prepare_inputs_for_vqa(self, question, context, context_page_corresp, answers=None):
encoding = self.tokenizer(question, context, padding=True, truncation=True, max_length=self.max_sequence_length, return_tensors='pt')
input_ids = encoding['input_ids'].to(self.model.device)
attention_mask = encoding['attention_mask'].to(self.model.device)
context_encoding = self.tokenizer.batch_encode_plus(context, padding=True, truncation=True, max_length=self.max_sequence_length)
if (answers is not None):
(start_pos, end_pos, context_page_token_correspondent) = model_utils.get_start_end_idx('BertQA', encoding, context, context_encoding, answers, context_page_corresp, self.page_retrieval, self.tokenizer.sep_token_id, self.tokenizer.pad_token_id, self.ignore_index, self.model.device)
else:
(start_pos, end_pos, context_page_token_correspondent) = (None, None, None)
return (input_ids, attention_mask, context_encoding, start_pos, end_pos, context_page_token_correspondent)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
answ_confidence = []
for batch_idx in range(len(context)):
document_encoding = self.tokenizer(([question[batch_idx]] * len(context[batch_idx])), context[batch_idx], padding=True, truncation=True, max_length=self.max_sequence_length, return_tensors='pt')
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
input_ids = document_encoding['input_ids'][page_idx].to(self.model.device)
attention_mask = document_encoding['attention_mask'][page_idx].to(self.model.device)
page_outputs = self.model(input_ids.unsqueeze(dim=0), attention_mask=attention_mask.unsqueeze(dim=0))
(pred_answer, answer_conf) = self.get_answer_from_model_output(input_ids.unsqueeze(dim=0), page_outputs)
if (answer_conf[0] > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = answer_conf[0]
outputs.append(None)
pred_answers.extend((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
answ_confidence.append(max_logits)
else:
(input_ids, attention_mask, context_encoding, start_pos, end_pos, context_page_token_correspondent) = self.prepare_inputs_for_vqa(question, context, batch['context_page_corresp'], answers)
outputs = self.model(input_ids, attention_mask=attention_mask, start_positions=start_pos, end_positions=end_pos)
(pred_answers, answ_confidence) = (self.get_answer_from_model_output(input_ids, outputs) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(context_page_token_correspondent[batch_idx][pred_start_idx].item() if (len(context_page_token_correspondent[batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, answ_confidence)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = []
for batch_idx in range(len(input_tokens)):
context_tokens = self.tokenizer.convert_ids_to_tokens(input_tokens[batch_idx].tolist())
answer_tokens = context_tokens[start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)]
answer = self.tokenizer.decode(self.tokenizer.convert_tokens_to_ids(answer_tokens))
answer = answer.strip()
answers.append(answer)
answ_confidence = model_utils.get_extractive_confidence(outputs)
return (answers, answ_confidence)
|
class BigBird():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = BigBirdTokenizerFast.from_pretrained(config['model_weights'])
self.model = BigBirdForQuestionAnswering.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
answ_confidence = []
for batch_idx in range(len(context)):
document_encoding = self.tokenizer(([question[batch_idx]] * len(context[batch_idx])), context[batch_idx], return_tensors='pt', padding=True, truncation=True)
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
input_ids = document_encoding['input_ids'][page_idx].to(self.model.device)
attention_mask = document_encoding['attention_mask'][page_idx].to(self.model.device)
page_outputs = self.model(input_ids.unsqueeze(dim=0), attention_mask=attention_mask.unsqueeze(dim=0))
(pred_answer, answer_conf) = self.get_answer_from_model_output(input_ids.unsqueeze(dim=0), page_outputs)
if (answer_conf[0] > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = answer_conf[0]
outputs.append(None)
pred_answers.extend((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
answ_confidence.append(max_logits)
else:
encoding = self.tokenizer(question, context, return_tensors='pt', padding=True, truncation=True)
input_ids = encoding['input_ids'].to(self.model.device)
attention_mask = encoding['attention_mask'].to(self.model.device)
context_encoding = self.tokenizer.batch_encode_plus(context, padding=True, truncation=True)
(start_pos, end_pos, context_page_token_correspondent) = model_utils.get_start_end_idx('BigBird', encoding, context, context_encoding, answers, batch['context_page_corresp'], self.page_retrieval, self.tokenizer.sep_token_id, self.tokenizer.pad_token_id, self.ignore_index, self.model.device)
outputs = self.model(input_ids, attention_mask=attention_mask, start_positions=start_pos, end_positions=end_pos)
(pred_answers, answ_confidence) = (self.get_answer_from_model_output(input_ids, outputs) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(context_page_token_correspondent[batch_idx][pred_start_idx] if (len(context_page_token_correspondent[batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, answ_confidence)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = []
for batch_idx in range(len(input_tokens)):
context_tokens = self.tokenizer.convert_ids_to_tokens(input_tokens[batch_idx].tolist())
answer_tokens = context_tokens[start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)]
answer = self.tokenizer.decode(self.tokenizer.convert_tokens_to_ids(answer_tokens))
answer = answer.strip()
answers.append(answer)
answ_confidence = model_utils.get_extractive_confidence(outputs)
return (answers, answ_confidence)
|
class LayoutLMv2():
def __init__(self, config):
self.batch_size = config['batch_size']
self.processor = LayoutLMv2Processor.from_pretrained(config['model_weights'], apply_ocr=False)
self.model = LayoutLMv2ForQuestionAnswering.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
bs = len(question)
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
for batch_idx in range(bs):
images = [Image.open(img_path).convert('RGB') for img_path in batch['image_names'][batch_idx]]
boxes = [(bbox * 1000).astype(int) for bbox in batch['boxes'][batch_idx]]
document_encoding = self.processor(images, ([question[batch_idx]] * len(images)), batch['words'][batch_idx], boxes=boxes, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
page_inputs = {k: v[page_idx].unsqueeze(dim=0) for (k, v) in document_encoding.items()}
page_outputs = self.model(**page_inputs)
start_logits_cnf = [page_outputs.start_logits[(batch_ix, max_start_logits_idx.item())].item() for (batch_ix, max_start_logits_idx) in enumerate(page_outputs.start_logits.argmax((- 1)))][0]
end_logits_cnf = [page_outputs.end_logits[(batch_ix, max_end_logits_idx.item())].item() for (batch_ix, max_end_logits_idx) in enumerate(page_outputs.end_logits.argmax((- 1)))][0]
page_logits = np.mean([start_logits_cnf, end_logits_cnf])
if (page_logits > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = page_logits
outputs.append(None)
pred_answers.append((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
else:
if (self.page_retrieval in ['oracle', None]):
images = [Image.open(img_path).convert('RGB') for img_path in batch['image_names']]
elif (self.page_retrieval == 'concat'):
images = []
for batch_idx in range(bs):
images.append(self.get_concat_v_multi_resize([Image.open(img_path).convert('RGB') for img_path in batch['image_names'][batch_idx]]))
elif (self.page_retrieval == 'none'):
images = [Image.open(img_path).convert('RGB') for img_path in batch['image_names']]
boxes = [(bbox * 1000).astype(int) for bbox in batch['boxes']]
encoding = self.processor(images, question, batch['words'], boxes=boxes, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
(start_pos, end_pos) = self.get_start_end_idx(encoding, context, answers)
outputs = self.model(**encoding, start_positions=start_pos, end_positions=end_pos)
pred_answers = (self.get_answer_from_model_output(encoding.input_ids, outputs) if return_pred_answer else None)
' DEBUG\n # print(pred_answers)\n for batch_idx in range(len(question)):\n if pred_answers[batch_idx] in batch[\'answers\'][batch_idx]:\n pred_start_pos = outputs.start_logits.argmax(-1)[batch_idx].item()\n pred_end_pos = outputs.end_logits.argmax(-1)[batch_idx].item()\n\n wrong = False\n if pred_start_pos != start_pos[batch_idx]:\n print("GT start pos {:} and pred start pos {:} are different!!!".format(start_pos[batch_idx], pred_start_pos))\n wrong = True\n if pred_end_pos != end_pos[batch_idx]:\n print("GT end pos {:} and pred end pos {:} are different!!!".format(end_pos[batch_idx], pred_end_pos))\n wrong = True\n\n if wrong:\n print("Answers - GT: {:} \t\t Pred: {:s}".format(batch[\'answers\'][batch_idx], pred_answers[batch_idx]))\n pred_span = self.processor.tokenizer.decode(encoding.input_ids[batch_idx][pred_start_pos:pred_end_pos+1])\n gt_span = self.processor.tokenizer.decode(encoding.input_ids[batch_idx][start_pos[batch_idx]:end_pos[batch_idx]+1])\n print("GT Span: {:s} \t Pred span: {:s}".format(pred_span, gt_span))\n\n start_pos, end_pos = self.get_start_end_idx(encoding, context, answers)\n\n # for token_pos, token in enumerate(encoding.input_ids[batch_idx]):\n # print(self.processor.tokenizer.decode(token))\n END DEBUG '
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(batch['context_page_corresp'][batch_idx][pred_start_idx] if (len(batch['context_page_corresp'][batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
if (random.randint(0, 1000) == 0):
for (question_id, gt_answer, pred_answer) in zip(batch['question_id'], answers, pred_answers):
print('ID: {:5d} GT: {:} - Pred: {:s}'.format(question_id, gt_answer, pred_answer))
return (outputs, pred_answers, pred_answer_pages)
def get_concat_v_multi_resize(self, im_list, resample=Image.BICUBIC):
min_width = min((im.width for im in im_list))
im_list_resize = [im.resize((min_width, int(((im.height * min_width) / im.width))), resample=resample) for im in im_list]
heights = [im.height for im in im_list]
im_list_resize = [im.resize((im.height, max(heights)), resample=resample) for im in im_list_resize]
total_height = sum((im.height for im in im_list_resize))
dst = Image.new('RGB', (min_width, total_height))
pos_y = 0
for im in im_list_resize:
dst.paste(im, (0, pos_y))
pos_y += im.height
return dst
def get_start_end_idx(self, encoding, context, answers):
pos_idx = []
for batch_idx in range(len(encoding.input_ids)):
answer_pos = []
for answer in answers[batch_idx]:
encoded_answer = [token for token in self.processor.tokenizer.encode([answer], boxes=[0, 0, 0, 0]) if (token not in self.processor.tokenizer.all_special_ids)]
answer_tokens_length = len(encoded_answer)
for token_pos in range(len(encoding.input_ids[batch_idx])):
if (encoding.input_ids[batch_idx][token_pos:(token_pos + answer_tokens_length)].tolist() == encoded_answer):
answer_pos.append([token_pos, ((token_pos + answer_tokens_length) - 1)])
if (len(answer_pos) == 0):
pos_idx.append([self.ignore_index, self.ignore_index])
else:
answer_pos = random.choice(answer_pos)
pos_idx.append(answer_pos)
start_idxs = torch.LongTensor([idx[0] for idx in pos_idx]).to(self.model.device)
end_idxs = torch.LongTensor([idx[1] for idx in pos_idx]).to(self.model.device)
return (start_idxs, end_idxs)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = [self.processor.tokenizer.decode(input_tokens[batch_idx][start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)]).strip() for batch_idx in range(len(input_tokens))]
return answers
|
class LayoutLMv3():
def __init__(self, config):
self.batch_size = config['batch_size']
self.processor = LayoutLMv3Processor.from_pretrained(config['model_weights'], apply_ocr=False)
self.model = LayoutLMv3ForQuestionAnswering.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def forward(self, batch, return_pred_answer=False):
bs = len(batch['question_id'])
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
images = batch['images']
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
answ_confidence = []
for batch_idx in range(bs):
images = [Image.open(img_path).convert('RGB') for img_path in batch['image_names'][batch_idx]]
boxes = [(bbox * 1000).astype(int) for bbox in batch['boxes'][batch_idx]]
document_encoding = self.processor(images, ([question[batch_idx]] * len(images)), batch['words'][batch_idx], boxes=boxes, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
page_inputs = {k: v[page_idx].unsqueeze(dim=0) for (k, v) in document_encoding.items()}
page_outputs = self.model(**page_inputs)
(pred_answer, answer_conf) = self.get_answer_from_model_output(page_inputs['input_ids'].unsqueeze(dim=0), page_outputs)
'\n start_logits_cnf = [page_outputs.start_logits[batch_ix, max_start_logits_idx.item()].item() for batch_ix, max_start_logits_idx in enumerate(page_outputs.start_logits.argmax(-1))][0]\n end_logits_cnf = [page_outputs.end_logits[batch_ix, max_end_logits_idx.item()].item() for batch_ix, max_end_logits_idx in enumerate(page_outputs.end_logits.argmax(-1))][0]\n page_logits = np.mean([start_logits_cnf, end_logits_cnf])\n '
if (answer_conf[0] > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = answer_conf[0]
outputs.append(None)
pred_answers.extend((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
answ_confidence.append(max_logits)
else:
boxes = [(bbox * 1000).astype(int) for bbox in batch['boxes']]
encoding = self.processor(images, question, batch['words'], boxes=boxes, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
(start_pos, end_pos) = self.get_start_end_idx(encoding, context, answers)
outputs = self.model(**encoding, start_positions=start_pos, end_positions=end_pos)
(pred_answers, answ_confidence) = (self.get_answer_from_model_output(encoding.input_ids, outputs) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(batch['context_page_corresp'][batch_idx][pred_start_idx] if (len(batch['context_page_corresp'][batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, answ_confidence)
def get_concat_v_multi_resize(self, im_list, resample=Image.BICUBIC):
min_width = min((im.width for im in im_list))
im_list_resize = [im.resize((min_width, int(((im.height * min_width) / im.width))), resample=resample) for im in im_list]
heights = [im.height for im in im_list]
im_list_resize = [im.resize((im.height, max(heights)), resample=resample) for im in im_list_resize]
total_height = sum((im.height for im in im_list_resize))
dst = Image.new('RGB', (min_width, total_height))
pos_y = 0
for im in im_list_resize:
dst.paste(im, (0, pos_y))
pos_y += im.height
return dst
def get_start_end_idx(self, encoding, context, answers):
pos_idx = []
for batch_idx in range(len(encoding.input_ids)):
answer_pos = []
for answer in answers[batch_idx]:
encoded_answer = [token for token in self.processor.tokenizer.encode([answer], boxes=[0, 0, 0, 0]) if (token not in self.processor.tokenizer.all_special_ids)]
answer_tokens_length = len(encoded_answer)
for token_pos in range(len(encoding.input_ids[batch_idx])):
if (encoding.input_ids[batch_idx][token_pos:(token_pos + answer_tokens_length)].tolist() == encoded_answer):
answer_pos.append([token_pos, ((token_pos + answer_tokens_length) - 1)])
if (len(answer_pos) == 0):
pos_idx.append([self.ignore_index, self.ignore_index])
else:
answer_pos = random.choice(answer_pos)
pos_idx.append(answer_pos)
start_idxs = torch.LongTensor([idx[0] for idx in pos_idx]).to(self.model.device)
end_idxs = torch.LongTensor([idx[1] for idx in pos_idx]).to(self.model.device)
return (start_idxs, end_idxs)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = [self.processor.tokenizer.decode(input_tokens[batch_idx][start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)], skip_special_tokens=True).strip() for batch_idx in range(len(input_tokens))]
start_logits = outputs.start_logits.softmax(dim=1).detach().cpu()
end_logits = outputs.end_logits.softmax(dim=1).detach().cpu()
answ_confidence = []
for batch_idx in range(len(input_tokens)):
conf_mat = np.matmul(np.expand_dims(start_logits[batch_idx].unsqueeze(dim=0), (- 1)), np.expand_dims(end_logits[batch_idx].unsqueeze(dim=0), 1)).squeeze(axis=0)
answ_confidence.append(conf_mat[(start_idxs[batch_idx], end_idxs[batch_idx])].item())
aansw_confidence = model_utils.get_extractive_confidence(outputs)
return (answers, answ_confidence)
|
class LongT5():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = AutoTokenizer.from_pretrained(config['model_weights'])
self.model = LongT5ForConditionalGeneration.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.max_sequence_length = config.get('max_sequence_length', 4096)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def prepare_inputs_for_vqa(self, question, context, answers=None):
input_text = ['question: {:s} context: {:s}'.format(q, c) for (q, c) in zip(question, context)]
tokens = self.tokenizer(input_text, padding=True, truncation=True, max_length=self.max_sequence_length, return_tensors='pt').to(self.model.device)
if (answers is not None):
answers = [random.choice(answer) for answer in answers]
labels = self.tokenizer(answers, return_tensors='pt', padding=True)
labels.input_ids[(labels.input_ids[:] == self.tokenizer.pad_token_id)] = (- 100)
labels = labels.input_ids.to(self.model.device)
else:
labels = None
return (tokens.input_ids, tokens.attention_mask, labels)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
num_pages = batch['num_pages']
outputs = []
pred_answers = []
pred_answer_pages = []
pred_answers_conf = []
for batch_idx in range(len(context)):
(input_ids, attention_mask, _) = self.prepare_inputs_for_vqa(([question[batch_idx]] * num_pages[batch_idx]), context[batch_idx])
(pred_answer, logits) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
max_logits = (- 999999)
answer_page = None
best_answer = None
for p_ix in range(len(input_ids)):
if (logits[p_ix] > max_logits):
max_logits = logits[p_ix]
answer_page = p_ix
best_answer = pred_answer[p_ix]
outputs.append(None)
pred_answers.append(best_answer)
pred_answer_pages.append(answer_page)
pred_answers_conf.append(max_logits)
else:
(input_ids, attention_mask, labels) = self.prepare_inputs_for_vqa(question, context, answers)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
(pred_answers, logits) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
else:
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, logits)
def get_answer_from_model_output(self, input_tokens, attention_mask):
output = self.model.generate(input_tokens, attention_mask=attention_mask, output_scores=True, return_dict_in_generate=True, output_attentions=True)
pred_answers = self.tokenizer.batch_decode(output['sequences'], skip_special_tokens=True)
pred_answers_conf = model_utils.get_generative_confidence(output)
return (pred_answers, pred_answers_conf)
|
class Longformer():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = LongformerTokenizerFast.from_pretrained(config['model_weights'])
self.model = LongformerForQuestionAnswering.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
answ_confidence = []
for batch_idx in range(len(context)):
document_encoding = self.tokenizer(([question[batch_idx]] * len(context[batch_idx])), context[batch_idx], return_tensors='pt', padding=True, truncation=True)
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
input_ids = document_encoding['input_ids'][page_idx].to(self.model.device)
attention_mask = document_encoding['attention_mask'][page_idx].to(self.model.device)
page_outputs = self.model(input_ids.unsqueeze(dim=0), attention_mask=attention_mask.unsqueeze(dim=0))
(pred_answer, answer_conf) = self.get_answer_from_model_output(input_ids.unsqueeze(dim=0), page_outputs)
'\n start_logits_cnf = [page_outputs.start_logits[batch_ix, max_start_logits_idx.item()].item() for batch_ix, max_start_logits_idx in enumerate(page_outputs.start_logits.argmax(-1))][0]\n end_logits_cnf = [page_outputs.end_logits[batch_ix, max_end_logits_idx.item()].item() for batch_ix, max_end_logits_idx in enumerate(page_outputs.end_logits.argmax(-1))][0]\n page_logits = np.mean([start_logits_cnf, end_logits_cnf])\n '
if (answer_conf[0] > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = answer_conf[0]
outputs.append(None)
pred_answers.extend((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
answ_confidence.append(max_logits)
else:
encoding = self.tokenizer(question, context, return_tensors='pt', padding=True, truncation=True)
input_ids = encoding['input_ids'].to(self.model.device)
attention_mask = encoding['attention_mask'].to(self.model.device)
context_encoding = self.tokenizer.batch_encode_plus(context, padding=True, truncation=True)
(start_pos, end_pos, context_page_token_correspondent) = model_utils.get_start_end_idx('Longformer', encoding, context, context_encoding, answers, batch['context_page_corresp'], self.page_retrieval, self.tokenizer.sep_token_id, self.tokenizer.pad_token_id, self.ignore_index, self.model.device)
outputs = self.model(input_ids, attention_mask=attention_mask, start_positions=start_pos, end_positions=end_pos)
(pred_answers, answ_confidence) = (self.get_answer_from_model_output(input_ids, outputs) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(context_page_token_correspondent[batch_idx][pred_start_idx] if (len(context_page_token_correspondent[batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
'\n if random.randint(0, 1000) == 0:\n print(batch[\'question_id\'])\n for gt_answer, pred_answer in zip(answers, pred_answers):\n print(gt_answer, pred_answer)\n\n for start_p, end_p, pred_start_p, pred_end_p in zip(start_pos, end_pos, outputs.start_logits.argmax(-1), outputs.end_logits.argmax(-1)):\n print("GT: {:d}-{:d} \t Pred: {:d}-{:d}".format(start_p.item(), end_p.item(), pred_start_p, pred_end_p))\n '
return (outputs, pred_answers, pred_answer_pages, answ_confidence)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = []
for batch_idx in range(len(input_tokens)):
context_tokens = self.tokenizer.convert_ids_to_tokens(input_tokens[batch_idx].tolist())
answer_tokens = context_tokens[start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)]
answer = self.tokenizer.decode(self.tokenizer.convert_tokens_to_ids(answer_tokens))
answer = answer.strip()
answers.append(answer)
answ_confidence = model_utils.get_extractive_confidence(outputs)
return (answers, answ_confidence)
|
class T5():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = T5Tokenizer.from_pretrained(config['model_weights'])
self.model = T5ForConditionalGeneration.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
def parallelize(self):
self.model = nn.DataParallel(self.model)
def prepare_inputs_for_vqa(self, question, context, answers=None):
input_text = ['question: {:s} context: {:s}'.format(q, c) for (q, c) in zip(question, context)]
tokens = self.tokenizer(input_text, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
if (answers is not None):
answers = [random.choice(answer) for answer in answers]
labels = self.tokenizer(answers, return_tensors='pt', padding=True)
labels.input_ids[(labels.input_ids[:] == self.tokenizer.pad_token_id)] = (- 100)
labels = labels.input_ids.to(self.model.device)
else:
labels = None
return (tokens.input_ids, tokens.attention_mask, labels)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
num_pages = batch['num_pages']
outputs = []
pred_answers = []
pred_answer_pages = []
pred_answers_conf = []
for batch_idx in range(len(context)):
(input_ids, attention_mask, _) = self.prepare_inputs_for_vqa(([question[batch_idx]] * num_pages[batch_idx]), context[batch_idx])
(pred_answer, logits) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
max_logits = (- 999999)
answer_page = None
best_answer = None
for p_ix in range(len(input_ids)):
if (logits[p_ix] > max_logits):
max_logits = logits[p_ix]
answer_page = p_ix
best_answer = pred_answer[p_ix]
outputs.append(None)
pred_answers.append(best_answer)
pred_answer_pages.append(answer_page)
pred_answers_conf.append(max_logits)
else:
(input_ids, attention_mask, labels) = self.prepare_inputs_for_vqa(question, context, answers)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
(pred_answers, pred_answers_conf) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
else:
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, pred_answers_conf)
def get_answer_from_model_output(self, input_ids, attention_mask):
output = self.model.generate(input_ids, attention_mask=attention_mask, output_scores=True, return_dict_in_generate=True, output_attentions=True)
pred_answers = self.tokenizer.batch_decode(output['sequences'], skip_special_tokens=True)
pred_answers_conf = model_utils.get_generative_confidence(output)
return (pred_answers, pred_answers_conf)
|
def train_epoch(data_loader, model, optimizer, lr_scheduler, evaluator, logger, **kwargs):
model.model.train()
for (batch_idx, batch) in enumerate(tqdm(data_loader)):
gt_answers = batch['answers']
(outputs, pred_answers, pred_answer_page, answer_conf) = model.forward(batch, return_pred_answer=True)
loss = ((outputs.loss + outputs.ret_loss) if hasattr(outputs, 'ret_loss') else outputs.loss)
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
metric = evaluator.get_metrics(gt_answers, pred_answers)
batch_acc = np.mean(metric['accuracy'])
batch_anls = np.mean(metric['anls'])
log_dict = {'Train/Batch loss': outputs.loss.item(), 'Train/Batch Accuracy': batch_acc, 'Train/Batch ANLS': batch_anls, 'lr': optimizer.param_groups[0]['lr']}
if hasattr(outputs, 'ret_loss'):
log_dict['Train/Batch retrieval loss'] = outputs.ret_loss.item()
if (('answer_page_idx' in batch) and (None not in batch['answer_page_idx'])):
ret_metric = evaluator.get_retrieval_metric(batch.get('answer_page_idx', None), pred_answer_page)
batch_ret_prec = np.mean(ret_metric)
log_dict['Train/Batch Ret. Prec.'] = batch_ret_prec
logger.logger.log(log_dict, step=((logger.current_epoch * logger.len_dataset) + batch_idx))
|
def train(model, **kwargs):
epochs = kwargs['train_epochs']
batch_size = kwargs['batch_size']
seed_everything(kwargs['seed'])
evaluator = Evaluator(case_sensitive=False)
logger = Logger(config=kwargs)
logger.log_model_parameters(model)
train_dataset = build_dataset(config, 'train')
val_dataset = build_dataset(config, 'val')
train_data_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, collate_fn=singlepage_docvqa_collate_fn)
val_data_loader = DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=False, collate_fn=singlepage_docvqa_collate_fn)
logger.len_dataset = len(train_data_loader)
(optimizer, lr_scheduler) = build_optimizer(model, length_train_loader=len(train_data_loader), config=kwargs)
if kwargs.get('eval_start', False):
logger.current_epoch = (- 1)
(accuracy, anls, ret_prec, _, _) = evaluate(val_data_loader, model, evaluator, return_scores_by_sample=False, return_pred_answers=False, **kwargs)
is_updated = evaluator.update_global_metrics(accuracy, anls, (- 1))
logger.log_val_metrics(accuracy, anls, ret_prec, update_best=is_updated)
for epoch_ix in range(epochs):
logger.current_epoch = epoch_ix
train_epoch(train_data_loader, model, optimizer, lr_scheduler, evaluator, logger, **kwargs)
(accuracy, anls, ret_prec, _, _) = evaluate(val_data_loader, model, evaluator, return_scores_by_sample=False, return_pred_answers=False, **kwargs)
is_updated = evaluator.update_global_metrics(accuracy, anls, epoch_ix)
logger.log_val_metrics(accuracy, anls, ret_prec, update_best=is_updated)
save_model(model, epoch_ix, update_best=is_updated, **kwargs)
|
class BouncingBallExample(nn.Module):
def __init__(self, radius=0.2, gravity=9.8, adjoint=False):
super().__init__()
self.gravity = nn.Parameter(torch.as_tensor([gravity]))
self.log_radius = nn.Parameter(torch.log(torch.as_tensor([radius])))
self.t0 = nn.Parameter(torch.tensor([0.0]))
self.init_pos = nn.Parameter(torch.tensor([10.0]))
self.init_vel = nn.Parameter(torch.tensor([0.0]))
self.absorption = nn.Parameter(torch.tensor([0.2]))
self.odeint = (odeint_adjoint if adjoint else odeint)
def forward(self, t, state):
(pos, vel, log_radius) = state
dpos = vel
dvel = (- self.gravity)
return (dpos, dvel, torch.zeros_like(log_radius))
def event_fn(self, t, state):
(pos, _, log_radius) = state
return (pos - torch.exp(log_radius))
def get_initial_state(self):
state = (self.init_pos, self.init_vel, self.log_radius)
return (self.t0, state)
def state_update(self, state):
'Updates state based on an event (collision).'
(pos, vel, log_radius) = state
pos = (pos + 1e-07)
vel = ((- vel) * (1 - self.absorption))
return (pos, vel, log_radius)
def get_collision_times(self, nbounces=1):
event_times = []
(t0, state) = self.get_initial_state()
for i in range(nbounces):
(event_t, solution) = odeint_event(self, state, t0, event_fn=self.event_fn, reverse_time=False, atol=1e-08, rtol=1e-08, odeint_interface=self.odeint)
event_times.append(event_t)
state = self.state_update(tuple((s[(- 1)] for s in solution)))
t0 = event_t
return event_times
def simulate(self, nbounces=1):
event_times = self.get_collision_times(nbounces)
(t0, state) = self.get_initial_state()
trajectory = [state[0][None]]
velocity = [state[1][None]]
times = [t0.reshape((- 1))]
for event_t in event_times:
tt = torch.linspace(float(t0), float(event_t), int(((float(event_t) - float(t0)) * 50)))[1:(- 1)]
tt = torch.cat([t0.reshape((- 1)), tt, event_t.reshape((- 1))])
solution = odeint(self, state, tt, atol=1e-08, rtol=1e-08)
trajectory.append(solution[0][1:])
velocity.append(solution[1][1:])
times.append(tt[1:])
state = self.state_update(tuple((s[(- 1)] for s in solution)))
t0 = event_t
return (torch.cat(times), torch.cat(trajectory, dim=0).reshape((- 1)), torch.cat(velocity, dim=0).reshape((- 1)), event_times)
|
def gradcheck(nbounces):
system = BouncingBallExample()
variables = {'init_pos': system.init_pos, 'init_vel': system.init_vel, 't0': system.t0, 'gravity': system.gravity, 'log_radius': system.log_radius}
event_t = system.get_collision_times(nbounces)[(- 1)]
event_t.backward()
analytical_grads = {}
for (name, p) in system.named_parameters():
for var in variables.keys():
if (var in name):
analytical_grads[var] = p.grad
eps = 0.001
fd_grads = {}
for (var, param) in variables.items():
orig = param.data
param.data = (orig - eps)
f_meps = system.get_collision_times(nbounces)[(- 1)]
param.data = (orig + eps)
f_peps = system.get_collision_times(nbounces)[(- 1)]
param.data = orig
fd = ((f_peps - f_meps) / (2 * eps))
fd_grads[var] = fd
success = True
for var in variables.keys():
analytical = analytical_grads[var]
fd = fd_grads[var]
if (torch.norm((analytical - fd)) > 0.0001):
success = False
print(f'Got analytical grad {analytical.item()} for {var} param but finite difference is {fd.item()}')
if (not success):
raise Exception('Gradient check failed.')
print('Gradient check passed.')
|
class NFEDiffEq():
def __init__(self, diffeq):
self.diffeq = diffeq
self.nfe = 0
def __call__(self, t, y):
self.nfe += 1
return self.diffeq(t, y)
|
def main():
sol = dict()
for method in ['dopri5', 'adams']:
for tol in [0.001, 1e-06, 1e-09]:
print('======= {} | tol={:e} ======='.format(method, tol))
nfes = []
times = []
errs = []
for c in ['A', 'B', 'C', 'D', 'E']:
for i in ['1', '2', '3', '4', '5']:
(diffeq, init, _) = getattr(detest, (c + i))()
(t0, y0) = init()
diffeq = NFEDiffEq(diffeq)
if (not ((c + i) in sol)):
sol[(c + i)] = odeint(diffeq, y0, torch.stack([t0, torch.tensor(20.0)]), atol=1e-12, rtol=1e-12, method='dopri5')[1]
diffeq.nfe = 0
start_time = time.time()
est = odeint(diffeq, y0, torch.stack([t0, torch.tensor(20.0)]), atol=tol, rtol=tol, method=method)
time_spent = (time.time() - start_time)
error = torch.sqrt(torch.mean(((sol[(c + i)] - est[1]) ** 2)))
errs.append(error.item())
nfes.append(diffeq.nfe)
times.append(time_spent)
print('{}: NFE {} | Time {} | Err {:e}'.format((c + i), diffeq.nfe, time_spent, error.item()))
print('Total NFE {} | Total Time {} | GeomAvg Error {:e}'.format(np.sum(nfes), np.sum(times), gmean(errs)))
|
class TestCollectionState(unittest.TestCase):
def test_forward(self):
for dtype in DTYPES:
eps = EPS[dtype]
for device in DEVICES:
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device)
tuple_f = (lambda t, y: (f(t, y[0]), f(t, y[1])))
tuple_y0 = (y0, y0)
for method in ADAPTIVE_METHODS:
with self.subTest(dtype=dtype, device=device, method=method):
tuple_y = torchdiffeq.odeint(tuple_f, tuple_y0, t_points, method=method)
max_error0 = (sol - tuple_y[0]).abs().max()
max_error1 = (sol - tuple_y[1]).abs().max()
self.assertLess(max_error0, eps)
self.assertLess(max_error1, eps)
def test_gradient(self):
for device in DEVICES:
(f, y0, t_points, sol) = construct_problem(device=device)
tuple_f = (lambda t, y: (f(t, y[0]), f(t, y[1])))
for method in ADAPTIVE_METHODS:
if (method == 'scipy_solver'):
continue
with self.subTest(device=device, method=method):
for i in range(2):
func = (lambda y0, t_points: torchdiffeq.odeint(tuple_f, (y0, y0), t_points, method=method)[i])
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
|
def rel_error(true, estimate):
return ((true - estimate) / true).abs().max()
|
class TestEventHandling(unittest.TestCase):
def test_odeint(self):
for reverse in (False, True):
for dtype in DTYPES:
for device in DEVICES:
for method in METHODS:
if (method == 'scipy_solver'):
continue
for ode in ('constant', 'sine'):
with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode, method=method):
if (method == 'explicit_adams'):
tol = 0.07
elif (method == 'euler'):
tol = 0.005
else:
tol = 0.0001
(f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse)
def event_fn(t, y):
return torch.sum((y - sol[2])).real
if (method in FIXED_METHODS):
options = {'step_size': 0.01, 'interp': 'cubic'}
else:
options = {}
(t, y) = torchdiffeq.odeint(f, y0, t_points[0:2], event_fn=event_fn, method=method, options=options)
y = y[(- 1)]
self.assertLess(rel_error(sol[2], y), tol)
self.assertLess(rel_error(t_points[2], t), tol)
def test_adjoint(self):
(f, y0, t_points, sol) = construct_problem(device='cpu', ode='constant')
def event_fn(t, y):
return torch.sum((y - sol[(- 1)]))
(t, y) = torchdiffeq.odeint_adjoint(f, y0, t_points[0:2], event_fn=event_fn, method='dopri5')
y = y[(- 1)]
self.assertLess(rel_error(sol[(- 1)], y), 0.0001)
self.assertLess(rel_error(t_points[(- 1)], t), 0.0001)
t.backward(retain_graph=True)
y.sum().backward()
|
def max_abs(tensor):
return torch.max(torch.abs(tensor))
|
class TestGradient(unittest.TestCase):
def test_odeint(self):
for device in DEVICES:
for method in METHODS:
if (method == 'scipy_solver'):
continue
with self.subTest(device=device, method=method):
(f, y0, t_points, _) = construct_problem(device=device)
func = (lambda y0, t_points: torchdiffeq.odeint(f, y0, t_points, method=method))
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint(self):
for device in DEVICES:
for method in METHODS:
with self.subTest(device=device, method=method):
(f, y0, t_points, _) = construct_problem(device=device)
func = (lambda y0, t_points: torchdiffeq.odeint_adjoint(f, y0, t_points, method=method))
self.assertTrue(torch.autograd.gradcheck(func, (y0, t_points)))
def test_adjoint_against_odeint(self):
'\n Test against dopri5\n '
for device in DEVICES:
for ode in PROBLEMS:
for t_grad in (True, False):
if (ode == 'constant'):
eps = 1e-12
elif (ode == 'linear'):
eps = 1e-05
elif (ode == 'sine'):
eps = 0.005
else:
raise RuntimeError
with self.subTest(device=device, ode=ode, t_grad=t_grad):
(f, y0, t_points, _) = construct_problem(device=device, ode=ode)
t_points.requires_grad_(t_grad)
ys = torchdiffeq.odeint(f, y0, t_points, rtol=1e-09, atol=1e-12)
torch.manual_seed(0)
gradys = torch.rand_like(ys)
ys.backward(gradys)
reg_y0_grad = y0.grad.clone()
reg_t_grad = (t_points.grad.clone() if t_grad else None)
reg_params_grads = []
for param in f.parameters():
reg_params_grads.append(param.grad.clone())
y0.grad.zero_()
if t_grad:
t_points.grad.zero_()
for param in f.parameters():
param.grad.zero_()
ys = torchdiffeq.odeint_adjoint(f, y0, t_points, rtol=1e-09, atol=1e-12)
ys.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = (t_points.grad if t_grad else None)
adj_params_grads = []
for param in f.parameters():
adj_params_grads.append(param.grad)
self.assertLess(max_abs((reg_y0_grad - adj_y0_grad)), eps)
if t_grad:
self.assertLess(max_abs((reg_t_grad - adj_t_grad)), eps)
for (reg_grad, adj_grad) in zip(reg_params_grads, adj_params_grads):
self.assertLess(max_abs((reg_grad - adj_grad)), eps)
|
class TestCompareAdjointGradient(unittest.TestCase):
def problem(self, device):
class Odefunc(torch.nn.Module):
def __init__(self):
super(Odefunc, self).__init__()
self.A = torch.nn.Parameter(torch.tensor([[(- 0.1), 2.0], [(- 2.0), (- 0.1)]]))
self.unused_module = torch.nn.Linear(2, 5)
def forward(self, t, y):
return torch.mm((y ** 3), self.A)
y0 = torch.tensor([[2.0, 0.0]], device=device, requires_grad=True)
t_points = torch.linspace(0.0, 25.0, 10, device=device, requires_grad=True)
func = Odefunc().to(device)
return (func, y0, t_points)
def test_against_dopri5(self):
method_eps = {'dopri5': (0.0003, 0.0001, 0.002), 'scipy_solver': (0.0003, 0.0001, 0.002)}
for device in DEVICES:
for (method, eps) in method_eps.items():
for t_grad in (True, False):
with self.subTest(device=device, method=method):
(func, y0, t_points) = self.problem(device=device)
t_points.requires_grad_(t_grad)
ys = torchdiffeq.odeint_adjoint(func, y0, t_points, method=method)
gradys = (torch.rand_like(ys) * 0.1)
ys.backward(gradys)
adj_y0_grad = y0.grad
adj_t_grad = (t_points.grad if t_grad else None)
adj_A_grad = func.A.grad
self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
self.assertEqual(max_abs(func.unused_module.bias.grad), 0)
(func, y0, t_points) = self.problem(device=device)
ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
ys.backward(gradys)
self.assertLess(max_abs((y0.grad - adj_y0_grad)), eps[0])
if t_grad:
self.assertLess(max_abs((t_points.grad - adj_t_grad)), eps[1])
self.assertLess(max_abs((func.A.grad - adj_A_grad)), eps[2])
|
@contextlib.contextmanager
def random_seed_torch(seed):
cpu_rng_state = torch.get_rng_state()
torch.manual_seed(seed)
try:
(yield)
finally:
torch.set_rng_state(cpu_rng_state)
|
class _NeuralF(torch.nn.Module):
def __init__(self, width, oscillate):
super(_NeuralF, self).__init__()
with random_seed_torch(0):
self.linears = torch.nn.Sequential(torch.nn.Linear(2, width), torch.nn.Tanh(), torch.nn.Linear(width, 2), torch.nn.Tanh())
self.nfe = 0
self.oscillate = oscillate
def forward(self, t, x):
self.nfe += 1
out = self.linears(x)
if self.oscillate:
out = (out * t.mul(2).sin())
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.