code
stringlengths
17
6.64M
class Logger(): ' Writes evaluation results of training/testing ' @classmethod def initialize(cls, args, training): logtime = datetime.datetime.now().__format__('_%m%d_%H%M%S') logpath = (args.logpath if training else (('_TEST_' + args.load.split('/')[(- 2)].split('.')[0]) + logtime)) if (logpath == ''): logpath = logtime cls.logpath = os.path.join('logs', (logpath + '.log')) cls.benchmark = args.benchmark os.makedirs(cls.logpath) logging.basicConfig(filemode='w', filename=os.path.join(cls.logpath, 'log.txt'), level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) cls.tbd_writer = SummaryWriter(os.path.join(cls.logpath, 'tbd/runs')) logging.info('\n:=========== Few-shot Seg. with HSNet ===========') for arg_key in args.__dict__: logging.info(('| %20s: %-24s' % (arg_key, str(args.__dict__[arg_key])))) logging.info(':================================================\n') @classmethod def info(cls, msg): ' Writes log message to log.txt ' logging.info(msg) @classmethod def save_model_miou(cls, model, epoch, val_miou): torch.save(model.state_dict(), os.path.join(cls.logpath, 'best_model.pt')) cls.info(('Model saved @%d w/ val. mIoU: %5.2f.\n' % (epoch, val_miou))) @classmethod def log_params(cls, model): backbone_param = 0 learner_param = 0 for k in model.state_dict().keys(): n_param = model.state_dict()[k].view((- 1)).size(0) if (k.split('.')[0] in 'backbone'): if (k.split('.')[1] in ['classifier', 'fc']): continue backbone_param += n_param else: learner_param += n_param Logger.info(('Backbone # param.: %d' % backbone_param)) Logger.info(('Learnable # param.: %d' % learner_param)) Logger.info(('Total # param.: %d' % (backbone_param + learner_param)))
def fix_randseed(seed): ' Set random seeds for reproducibility ' if (seed is None): seed = int((random.random() * 100000.0)) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True
def mean(x): return ((sum(x) / len(x)) if (len(x) > 0) else 0.0)
def to_cuda(batch): for (key, value) in batch.items(): if isinstance(value, torch.Tensor): batch[key] = value.cuda() return batch
def to_cpu(tensor): return tensor.detach().clone().cpu()
class DatasetCOCO(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = ('val' if (split in ['val', 'test']) else 'trn') self.fold = fold self.nfolds = 4 self.nclass = 80 self.benchmark = 'coco' self.shot = shot self.split_coco = (split if (split == 'val2014') else 'train2014') self.base_path = os.path.join(datapath, 'COCO2014') self.transform = transform self.use_original_imgsize = use_original_imgsize self.class_ids = self.build_class_ids() self.img_metadata_classwise = self.build_img_metadata_classwise() self.img_metadata = self.build_img_metadata() def __len__(self): return (len(self.img_metadata) if (self.split == 'trn') else 1000) def __getitem__(self, idx): (query_img, query_mask, support_imgs, support_masks, query_name, support_names, class_sample, org_qry_imsize) = self.load_frame() query_img = self.transform(query_img) query_mask = query_mask.float() if (not self.use_original_imgsize): query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) for (midx, smask) in enumerate(support_masks): support_masks[midx] = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() support_masks = torch.stack(support_masks) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'org_query_imsize': org_qry_imsize, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)} return batch def build_class_ids(self): nclass_trn = (self.nclass // self.nfolds) class_ids_val = [(self.fold + (self.nfolds * v)) for v in range(nclass_trn)] class_ids_trn = [x for x in range(self.nclass) if (x not in class_ids_val)] class_ids = (class_ids_trn if (self.split == 'trn') else class_ids_val) return class_ids def build_img_metadata_classwise(self): with open(('./data/splits/coco/%s/fold%d.pkl' % (self.split, self.fold)), 'rb') as f: img_metadata_classwise = pickle.load(f) return img_metadata_classwise def build_img_metadata(self): img_metadata = [] for k in self.img_metadata_classwise.keys(): img_metadata += self.img_metadata_classwise[k] return sorted(list(set(img_metadata))) def read_mask(self, name): mask_path = os.path.join(self.base_path, 'annotations', name) mask = torch.tensor(np.array(Image.open((mask_path[:mask_path.index('.jpg')] + '.png')))) return mask def load_frame(self): class_sample = np.random.choice(self.class_ids, 1, replace=False)[0] query_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] query_img = Image.open(os.path.join(self.base_path, query_name)).convert('RGB') query_mask = self.read_mask(query_name) org_qry_imsize = query_img.size query_mask[(query_mask != (class_sample + 1))] = 0 query_mask[(query_mask == (class_sample + 1))] = 1 support_names = [] while True: support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break support_imgs = [] support_masks = [] for support_name in support_names: support_imgs.append(Image.open(os.path.join(self.base_path, support_name)).convert('RGB')) support_mask = self.read_mask(support_name) support_mask[(support_mask != (class_sample + 1))] = 0 support_mask[(support_mask == (class_sample + 1))] = 1 support_masks.append(support_mask) return (query_img, query_mask, support_imgs, support_masks, query_name, support_names, class_sample, org_qry_imsize)
class FSSDataset(): @classmethod def initialize(cls, img_size, datapath, use_original_imgsize): cls.datasets = {'pascal': DatasetPASCAL, 'coco': DatasetCOCO, 'fss': DatasetFSS} cls.img_mean = [0.485, 0.456, 0.406] cls.img_std = [0.229, 0.224, 0.225] cls.datapath = datapath cls.use_original_imgsize = use_original_imgsize cls.transform = transforms.Compose([transforms.Resize(size=(img_size, img_size)), transforms.ToTensor(), transforms.Normalize(cls.img_mean, cls.img_std)]) @classmethod def build_dataloader(cls, benchmark, bsz, nworker, fold, split, shot=1): shuffle = (split == 'trn') nworker = (nworker if (split == 'trn') else 0) dataset = cls.datasets[benchmark](cls.datapath, fold=fold, transform=cls.transform, split=split, shot=shot, use_original_imgsize=cls.use_original_imgsize) dataloader = DataLoader(dataset, batch_size=bsz, shuffle=shuffle, num_workers=nworker) return dataloader
class DatasetFSS(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = split self.benchmark = 'fss' self.shot = shot self.base_path = os.path.join(datapath, 'FSS-1000') with open(('./data/splits/fss/%s.txt' % split), 'r') as f: self.categories = f.read().split('\n')[:(- 1)] self.categories = sorted(self.categories) self.class_ids = self.build_class_ids() self.img_metadata = self.build_img_metadata() self.transform = transform def __len__(self): return len(self.img_metadata) def __getitem__(self, idx): (query_name, support_names, class_sample) = self.sample_episode(idx) (query_img, query_mask, support_imgs, support_masks) = self.load_frame(query_name, support_names) query_img = self.transform(query_img) query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) support_masks_tmp = [] for smask in support_masks: smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() support_masks_tmp.append(smask) support_masks = torch.stack(support_masks_tmp) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)} return batch def load_frame(self, query_name, support_names): query_img = Image.open(query_name).convert('RGB') support_imgs = [Image.open(name).convert('RGB') for name in support_names] query_id = query_name.split('/')[(- 1)].split('.')[0] query_name = (os.path.join(os.path.dirname(query_name), query_id) + '.png') support_ids = [name.split('/')[(- 1)].split('.')[0] for name in support_names] support_names = [(os.path.join(os.path.dirname(name), sid) + '.png') for (name, sid) in zip(support_names, support_ids)] query_mask = self.read_mask(query_name) support_masks = [self.read_mask(name) for name in support_names] return (query_img, query_mask, support_imgs, support_masks) def read_mask(self, img_name): mask = torch.tensor(np.array(Image.open(img_name).convert('L'))) mask[(mask < 128)] = 0 mask[(mask >= 128)] = 1 return mask def sample_episode(self, idx): query_name = self.img_metadata[idx] class_sample = self.categories.index(query_name.split('/')[(- 2)]) if (self.split == 'val'): class_sample += 520 elif (self.split == 'test'): class_sample += 760 support_names = [] while True: support_name = np.random.choice(range(1, 11), 1, replace=False)[0] support_name = (os.path.join(os.path.dirname(query_name), str(support_name)) + '.jpg') if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break return (query_name, support_names, class_sample) def build_class_ids(self): if (self.split == 'trn'): class_ids = range(0, 520) elif (self.split == 'val'): class_ids = range(520, 760) elif (self.split == 'test'): class_ids = range(760, 1000) return class_ids def build_img_metadata(self): img_metadata = [] for cat in self.categories: img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, cat)))]) for img_path in img_paths: if (os.path.basename(img_path).split('.')[1] == 'jpg'): img_metadata.append(img_path) return img_metadata
class DatasetPASCAL(Dataset): def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize): self.split = ('val' if (split in ['val', 'test']) else 'trn') self.fold = fold self.nfolds = 4 self.nclass = 20 self.benchmark = 'pascal' self.shot = shot self.use_original_imgsize = use_original_imgsize self.img_path = os.path.join(datapath, 'VOC2012/JPEGImages/') self.ann_path = os.path.join(datapath, 'VOC2012/SegmentationClassAug/') self.transform = transform self.class_ids = self.build_class_ids() self.img_metadata = self.build_img_metadata() self.img_metadata_classwise = self.build_img_metadata_classwise() def __len__(self): return (len(self.img_metadata) if (self.split == 'trn') else 1000) def __getitem__(self, idx): idx %= len(self.img_metadata) (query_name, support_names, class_sample) = self.sample_episode(idx) (query_img, query_cmask, support_imgs, support_cmasks, org_qry_imsize) = self.load_frame(query_name, support_names) query_img = self.transform(query_img) if (not self.use_original_imgsize): query_cmask = F.interpolate(query_cmask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze() (query_mask, query_ignore_idx) = self.extract_ignore_idx(query_cmask.float(), class_sample) support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs]) support_masks = [] support_ignore_idxs = [] for scmask in support_cmasks: scmask = F.interpolate(scmask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze() (support_mask, support_ignore_idx) = self.extract_ignore_idx(scmask, class_sample) support_masks.append(support_mask) support_ignore_idxs.append(support_ignore_idx) support_masks = torch.stack(support_masks) support_ignore_idxs = torch.stack(support_ignore_idxs) batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'query_ignore_idx': query_ignore_idx, 'org_query_imsize': org_qry_imsize, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'support_ignore_idxs': support_ignore_idxs, 'class_id': torch.tensor(class_sample)} return batch def extract_ignore_idx(self, mask, class_id): boundary = (mask / 255).floor() mask[(mask != (class_id + 1))] = 0 mask[(mask == (class_id + 1))] = 1 return (mask, boundary) def load_frame(self, query_name, support_names): query_img = self.read_img(query_name) query_mask = self.read_mask(query_name) support_imgs = [self.read_img(name) for name in support_names] support_masks = [self.read_mask(name) for name in support_names] org_qry_imsize = query_img.size return (query_img, query_mask, support_imgs, support_masks, org_qry_imsize) def read_mask(self, img_name): 'Return segmentation mask in PIL Image' mask = torch.tensor(np.array(Image.open((os.path.join(self.ann_path, img_name) + '.png')))) return mask def read_img(self, img_name): 'Return RGB image in PIL Image' return Image.open((os.path.join(self.img_path, img_name) + '.jpg')) def sample_episode(self, idx): (query_name, class_sample) = self.img_metadata[idx] support_names = [] while True: support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0] if (query_name != support_name): support_names.append(support_name) if (len(support_names) == self.shot): break return (query_name, support_names, class_sample) def build_class_ids(self): nclass_trn = (self.nclass // self.nfolds) class_ids_val = [((self.fold * nclass_trn) + i) for i in range(nclass_trn)] class_ids_trn = [x for x in range(self.nclass) if (x not in class_ids_val)] if (self.split == 'trn'): return class_ids_trn else: return class_ids_val def build_img_metadata(self): def read_metadata(split, fold_id): fold_n_metadata = os.path.join(('data/splits/pascal/%s/fold%d.txt' % (split, fold_id))) with open(fold_n_metadata, 'r') as f: fold_n_metadata = f.read().split('\n')[:(- 1)] fold_n_metadata = [[data.split('__')[0], (int(data.split('__')[1]) - 1)] for data in fold_n_metadata] return fold_n_metadata img_metadata = [] if (self.split == 'trn'): for fold_id in range(self.nfolds): if (fold_id == self.fold): continue img_metadata += read_metadata(self.split, fold_id) elif (self.split == 'val'): img_metadata = read_metadata(self.split, self.fold) else: raise Exception(('Undefined split %s: ' % self.split)) print(('Total (%s) images are : %d' % (self.split, len(img_metadata)))) return img_metadata def build_img_metadata_classwise(self): img_metadata_classwise = {} for class_id in range(self.nclass): img_metadata_classwise[class_id] = [] for (img_name, img_class) in self.img_metadata: img_metadata_classwise[img_class] += [img_name] return img_metadata_classwise
class CenterPivotConv4d(nn.Module): ' CenterPivot 4D conv' def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True): super(CenterPivotConv4d, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size[:2], stride=stride[:2], bias=bias, padding=padding[:2]) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size[2:], stride=stride[2:], bias=bias, padding=padding[2:]) self.stride34 = stride[2:] self.kernel_size = kernel_size self.stride = stride self.padding = padding self.idx_initialized = False def prune(self, ct): (bsz, ch, ha, wa, hb, wb) = ct.size() if (not self.idx_initialized): idxh = torch.arange(start=0, end=hb, step=self.stride[2:][0], device=ct.device) idxw = torch.arange(start=0, end=wb, step=self.stride[2:][1], device=ct.device) self.len_h = len(idxh) self.len_w = len(idxw) self.idx = (idxw.repeat(self.len_h, 1) + (idxh.repeat(self.len_w, 1).t() * wb)).view((- 1)) self.idx_initialized = True ct_pruned = ct.view(bsz, ch, ha, wa, (- 1)).index_select(4, self.idx).view(bsz, ch, ha, wa, self.len_h, self.len_w) return ct_pruned def forward(self, x): if (self.stride[2:][(- 1)] > 1): out1 = self.prune(x) else: out1 = x (bsz, inch, ha, wa, hb, wb) = out1.size() out1 = out1.permute(0, 4, 5, 1, 2, 3).contiguous().view((- 1), inch, ha, wa) out1 = self.conv1(out1) (outch, o_ha, o_wa) = (out1.size((- 3)), out1.size((- 2)), out1.size((- 1))) out1 = out1.view(bsz, hb, wb, outch, o_ha, o_wa).permute(0, 3, 4, 5, 1, 2).contiguous() (bsz, inch, ha, wa, hb, wb) = x.size() out2 = x.permute(0, 2, 3, 1, 4, 5).contiguous().view((- 1), inch, hb, wb) out2 = self.conv2(out2) (outch, o_hb, o_wb) = (out2.size((- 3)), out2.size((- 2)), out2.size((- 1))) out2 = out2.view(bsz, ha, wa, outch, o_hb, o_wb).permute(0, 3, 1, 2, 4, 5).contiguous() if ((out1.size()[(- 2):] != out2.size()[(- 2):]) and (self.padding[(- 2):] == (0, 0))): out1 = out1.view(bsz, outch, o_ha, o_wa, (- 1)).sum(dim=(- 1)) out2 = out2.squeeze() y = (out1 + out2) return y
class Correlation(): @classmethod def multilayer_correlation(cls, query_feats, support_feats, stack_ids): eps = 1e-05 corrs = [] for (idx, (query_feat, support_feat)) in enumerate(zip(query_feats, support_feats)): (bsz, ch, hb, wb) = support_feat.size() support_feat = support_feat.view(bsz, ch, (- 1)) support_feat = (support_feat / (support_feat.norm(dim=1, p=2, keepdim=True) + eps)) (bsz, ch, ha, wa) = query_feat.size() query_feat = query_feat.view(bsz, ch, (- 1)) query_feat = (query_feat / (query_feat.norm(dim=1, p=2, keepdim=True) + eps)) corr = torch.bmm(query_feat.transpose(1, 2), support_feat).view(bsz, ha, wa, hb, wb) corr = corr.clamp(min=0) corrs.append(corr) corr_l4 = torch.stack(corrs[(- stack_ids[0]):]).transpose(0, 1).contiguous() corr_l3 = torch.stack(corrs[(- stack_ids[1]):(- stack_ids[0])]).transpose(0, 1).contiguous() corr_l2 = torch.stack(corrs[(- stack_ids[2]):(- stack_ids[1])]).transpose(0, 1).contiguous() return [corr_l4, corr_l3, corr_l2]
def extract_feat_vgg(img, backbone, feat_ids, bottleneck_ids=None, lids=None): ' Extract intermediate features from VGG ' feats = [] feat = img for (lid, module) in enumerate(backbone.features): feat = module(feat) if (lid in feat_ids): feats.append(feat.clone()) return feats
def extract_feat_res(img, backbone, feat_ids, bottleneck_ids, lids): ' Extract intermediate features from ResNet' feats = [] feat = backbone.conv1.forward(img) feat = backbone.bn1.forward(feat) feat = backbone.relu.forward(feat) feat = backbone.maxpool.forward(feat) for (hid, (bid, lid)) in enumerate(zip(bottleneck_ids, lids)): res = feat feat = backbone.__getattr__(('layer%d' % lid))[bid].conv1.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn1.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].conv2.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn2.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].conv3.forward(feat) feat = backbone.__getattr__(('layer%d' % lid))[bid].bn3.forward(feat) if (bid == 0): res = backbone.__getattr__(('layer%d' % lid))[bid].downsample.forward(res) feat += res if ((hid + 1) in feat_ids): feats.append(feat.clone()) feat = backbone.__getattr__(('layer%d' % lid))[bid].relu.forward(feat) return feats
class HPNLearner(nn.Module): def __init__(self, inch): super(HPNLearner, self).__init__() def make_building_block(in_channel, out_channels, kernel_sizes, spt_strides, group=4): assert (len(out_channels) == len(kernel_sizes) == len(spt_strides)) building_block_layers = [] for (idx, (outch, ksz, stride)) in enumerate(zip(out_channels, kernel_sizes, spt_strides)): inch = (in_channel if (idx == 0) else out_channels[(idx - 1)]) ksz4d = ((ksz,) * 4) str4d = ((1, 1) + ((stride,) * 2)) pad4d = (((ksz // 2),) * 4) building_block_layers.append(Conv4d(inch, outch, ksz4d, str4d, pad4d)) building_block_layers.append(nn.GroupNorm(group, outch)) building_block_layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*building_block_layers) (outch1, outch2, outch3) = (16, 64, 128) self.encoder_layer4 = make_building_block(inch[0], [outch1, outch2, outch3], [3, 3, 3], [2, 2, 2]) self.encoder_layer3 = make_building_block(inch[1], [outch1, outch2, outch3], [5, 3, 3], [4, 2, 2]) self.encoder_layer2 = make_building_block(inch[2], [outch1, outch2, outch3], [5, 5, 3], [4, 4, 2]) self.encoder_layer4to3 = make_building_block(outch3, [outch3, outch3, outch3], [3, 3, 3], [1, 1, 1]) self.encoder_layer3to2 = make_building_block(outch3, [outch3, outch3, outch3], [3, 3, 3], [1, 1, 1]) self.decoder1 = nn.Sequential(nn.Conv2d(outch3, outch3, (3, 3), padding=(1, 1), bias=True), nn.ReLU(), nn.Conv2d(outch3, outch2, (3, 3), padding=(1, 1), bias=True), nn.ReLU()) self.decoder2 = nn.Sequential(nn.Conv2d(outch2, outch2, (3, 3), padding=(1, 1), bias=True), nn.ReLU(), nn.Conv2d(outch2, 2, (3, 3), padding=(1, 1), bias=True)) def interpolate_support_dims(self, hypercorr, spatial_size=None): (bsz, ch, ha, wa, hb, wb) = hypercorr.size() hypercorr = hypercorr.permute(0, 4, 5, 1, 2, 3).contiguous().view(((bsz * hb) * wb), ch, ha, wa) hypercorr = F.interpolate(hypercorr, spatial_size, mode='bilinear', align_corners=True) (o_hb, o_wb) = spatial_size hypercorr = hypercorr.view(bsz, hb, wb, ch, o_hb, o_wb).permute(0, 3, 4, 5, 1, 2).contiguous() return hypercorr def forward(self, hypercorr_pyramid): hypercorr_sqz4 = self.encoder_layer4(hypercorr_pyramid[0]) hypercorr_sqz3 = self.encoder_layer3(hypercorr_pyramid[1]) hypercorr_sqz2 = self.encoder_layer2(hypercorr_pyramid[2]) hypercorr_sqz4 = self.interpolate_support_dims(hypercorr_sqz4, hypercorr_sqz3.size()[(- 4):(- 2)]) hypercorr_mix43 = (hypercorr_sqz4 + hypercorr_sqz3) hypercorr_mix43 = self.encoder_layer4to3(hypercorr_mix43) hypercorr_mix43 = self.interpolate_support_dims(hypercorr_mix43, hypercorr_sqz2.size()[(- 4):(- 2)]) hypercorr_mix432 = (hypercorr_mix43 + hypercorr_sqz2) hypercorr_mix432 = self.encoder_layer3to2(hypercorr_mix432) (bsz, ch, ha, wa, hb, wb) = hypercorr_mix432.size() hypercorr_encoded = hypercorr_mix432.view(bsz, ch, ha, wa, (- 1)).mean(dim=(- 1)) hypercorr_decoded = self.decoder1(hypercorr_encoded) upsample_size = (((hypercorr_decoded.size((- 1)) * 2),) * 2) hypercorr_decoded = F.interpolate(hypercorr_decoded, upsample_size, mode='bilinear', align_corners=True) logit_mask = self.decoder2(hypercorr_decoded) return logit_mask
def test(model, dataloader, nshot): ' Test HSNet ' utils.fix_randseed(0) average_meter = AverageMeter(dataloader.dataset) for (idx, batch) in enumerate(dataloader): batch = utils.to_cuda(batch) pred_mask = model.module.predict_mask_nshot(batch, nshot=nshot) assert (pred_mask.size() == batch['query_mask'].size()) (area_inter, area_union) = Evaluator.classify_prediction(pred_mask.clone(), batch) average_meter.update(area_inter, area_union, batch['class_id'], loss=None) average_meter.write_process(idx, len(dataloader), epoch=(- 1), write_batch_idx=1) if Visualizer.visualize: Visualizer.visualize_prediction_batch(batch['support_imgs'], batch['support_masks'], batch['query_img'], batch['query_mask'], pred_mask, batch['class_id'], idx, (area_inter[1].float() / area_union[1].float())) average_meter.write_result('Test', 0) (miou, fb_iou) = average_meter.compute_iou() return (miou, fb_iou)
def train(epoch, model, dataloader, optimizer, training): ' Train HSNet ' (utils.fix_randseed(None) if training else utils.fix_randseed(0)) (model.module.train_mode() if training else model.module.eval()) average_meter = AverageMeter(dataloader.dataset) for (idx, batch) in enumerate(dataloader): batch = utils.to_cuda(batch) logit_mask = model(batch['query_img'], batch['support_imgs'].squeeze(1), batch['support_masks'].squeeze(1)) pred_mask = logit_mask.argmax(dim=1) loss = model.module.compute_objective(logit_mask, batch['query_mask']) if training: optimizer.zero_grad() loss.backward() optimizer.step() (area_inter, area_union) = Evaluator.classify_prediction(pred_mask, batch) average_meter.update(area_inter, area_union, batch['class_id'], loss.detach().clone()) average_meter.write_process(idx, len(dataloader), epoch, write_batch_idx=50) average_meter.write_result(('Training' if training else 'Validation'), epoch) avg_loss = utils.mean(average_meter.loss_buf) (miou, fb_iou) = average_meter.compute_iou() return (avg_loss, miou, fb_iou)
def parse_arguments(): '\n Parse options for functions.\n ' parser = argparse.ArgumentParser(description='Tool for managing Elasticsearch indices') subparsers = parser.add_subparsers() create = subparsers.add_parser('create', help='Create Elasticsearch index') create.add_argument('-i', '--index', required=True, help='Name of the new index') create.add_argument('-m', '--mappings', type=argparse.FileType('r'), required=True, help='File where mappings configuration is store for the index') delete = subparsers.add_parser('delete', help='Delete one or more Elasticsearch indices') delete.add_argument('-e', '--erase', nargs='*', required=True, help='Name of index to delete') index = subparsers.add_parser('index', help='Index JSON files') index.add_argument('-d', '--dir', required=True, help='Directory where the JSON files are stored. Be sure that in this path to the Spider folder!!!') index.add_argument('-l', '--location', required=True, help='Name of the index where to index JSON files. If index do not exist it is created') index.add_argument('-t', '--item-type', required=True, help='Name of type to be stored in ES index') index.add_argument('-c', '--chunk-size', type=int, nargs='?', default=500, help='Number of JSON line to load into memory before indexing') reindex = subparsers.add_parser('reindex', help='Reindex index') reindex.add_argument('-s', '--source', required=True, help='Source index where documents are stored') reindex.add_argument('-t', '--target', required=True, help='Target index where to move documents') args = parser.parse_args() return args
def create_index(es, index_name, body): if (not es.indices.exists(index_name)): es.indices.create(index=index_name, body=body)
def delete_indices(es, indices_name): for index in indices_name: if es.indices.exists(index): es.indices.delete(index=index) else: logger.info('Index `{}` not found'.format(index))
def reindex(es, source_index, target_index): helpers.reindex(es, source_index=source_index, target_index=target_index)
def lazy_indexing(es, path, chunck, index, item_type): def serialize_json(json_line): to_null = ['author', 'article_tag', 'list_of_tags', 'keywords', 'news_keywords'] for tag in to_null: if (json_line[tag] == '---'): json_line[tag] = None if (json_line['publication_date'] == '---'): json_line['publication_date'] = datetime.strptime('1900-01-01', '%Y-%m-%d') else: try: json_line['publication_date'] = datetime.strptime(json_line['publication_date'], '%d %B %Y').date() except ValueError: try: json_line['publication_date'] = datetime.strptime(json_line['publication_date'].replace('T', ' '), '%Y-%m-%d %H:%S') except ValueError: pass return json_line def lazy_json_load(filename): with open(filename) as infile: for line in infile: json_line = json.loads(line) formattd_json_line = serialize_json(json_line) index_action = {'_index': index, '_type': item_type, '_id': formattd_json_line['url'], '_source': formattd_json_line} (yield index_action) files = [file for file in glob.glob((path + '/**/*.json'), recursive=True) if (not ('active.json' in file.split('/')))] logger.info('Fond {0} documents to index'.format(len(files))) for filename in files: logger.info('Indexing : {}'.format(filename)) helpers.bulk(client=es, chunk_size=chunck, actions=lazy_json_load(filename), index=index, doc_type='news_article', stats_only=True)
def groupByQuery(eintrag, eintrag_spalte): return dataset.groupby(eintrag_spalte).get_group(eintrag)
def groupByQuery(eintrag, eintrag_spalte): return dataset.groupby(eintrag_spalte).get_group(eintrag)
def gendata(records, index, type): for (k, v) in zip(records.keys(), records.values()): (yield {'_index': index, '_id': k, '_source': v})
def extract_classifications(line): classifications_list = [] start_classification = line.find('<classifications-ipcr>') relative_end_classification = (line[start_classification:].find('</classifications-ipcr>') + 23) classification_string = line[start_classification:(start_classification + relative_end_classification)] try: treeRoot = ET.fromstring(classification_string) for classifications in treeRoot.findall('classification-ipcr'): for classification in classifications: classifications_list.append(classification.text) except: print(('error classification for line: ' + classification_string)) return classifications_list
def extract_citationIDs(application_identifier, line): words = line.split('\t')[6].split(' ') indices = [i for (i, x) in enumerate(words) if ('sr-cit' in x)] return [((application_identifier + '_') + words[i][(words[i].find('sr-cit') + 6):(words[i].find('sr-cit') + 10)]) for i in indices]
def normalize_claims(claims): normalized_claims = [] for claim in claims.split(','): if ('-' not in claim): normalized_claims.append(int(claim)) else: for number in range(int(claim.split('-')[0]), (int(claim.split('-')[1]) + 1)): normalized_claims.append(number) return normalized_claims
def extract_citation_entry(citation_id, searchreport_line): citation = {} start_citation = searchreport_line.find(('<citation id="sr-cit' + citation_id[(- 4):])) relative_end_citation = (searchreport_line[start_citation:].find('</citation>') + 11) citation_string = searchreport_line[start_citation:(start_citation + relative_end_citation)] try: treeRoot = ET.fromstring(citation_string) except: print(('error citation for line: ' + citation_string)) return citation treeRoot.findall('category') last_category = '' for element in treeRoot: if (element.tag == 'category'): last_category = element.text elif (element.tag == 'rel-claims'): for category in last_category.split(','): citation.update({(('category' + '_') + category): normalize_claims(element.text)}) elif (element.tag == 'rel-passage'): for category in last_category.split(','): for passage in element: old_rel_passage = citation.get((('rel-passage' + '_') + category)) if (old_rel_passage == None): old_rel_passage = '' citation.update({(('rel-passage' + '_') + category): (old_rel_passage + passage.text)}) elif (element.tag == 'patcit'): citation.update({'dnum': element.attrib['dnum']}) citation.update({'url': element.attrib['url']}) for subelement in element: if (subelement.tag == 'document-id'): for child in subelement: if (child.tag == 'country'): citation.update({'country': child.text}) elif (child.tag == 'doc-number'): citation.update({'doc-number': child.text}) elif (child.tag == 'kind'): citation.update({'kind': child.text}) elif (child.tag == 'name'): citation.update({'name': child.text}) elif (child.tag == 'date'): citation.update({'date': child.text}) elif (element.tag == 'nplcit'): citation.update({'nplcit': 'true'}) if ((last_category != '') and ((('rel-passage' + '_') + last_category.split(',')[0]) not in citation.keys())): print(((('Kategorie ohne rel-passage, Citation ID/String: ' + citation_id) + ' / ') + citation_string)) return citation
def main(file): f = open(file, 'r', encoding='utf8', errors='ignore') lines = f.readlines() records = {} citations = {} for line in lines: if ('\ten\t' in line): application_identifier = line.split('EP\t')[1].split('\ten\t')[0].replace('\t', '') application_number = line.split('EP\t')[1].split('\t')[0] application_category = line.split('EP\t')[1].split('\t')[1] application_date = line.split('EP\t')[1].split('\t')[2] if (application_identifier not in records): records.update({application_identifier: {'application_number': application_number, 'application_category': application_category, 'application_date': application_date}}) record = records.get(application_identifier) if ('\tTITLE\t' in line): record.update({'title': line.split('\tTITLE\t')[1]}) elif ('\tABSTR\t' in line): record.update({'abstract': line.split('\tABSTR\t')[1]}) elif ('\tDESCR\t' in line): record.update({'description': line.split('\tDESCR\t')[1]}) elif ('\tCLAIM\t' in line): record.update({'claims': line.split('\tCLAIM\t')[1]}) elif ('\tAMEND\t' in line): record.update({'amended_claims': line.split('\tAMEND\t')[1]}) elif ('\tACSTM\t' in line): record.update({'amended_claims_statements': line.split('\tACSTM\t')[1]}) elif ('\tSRPRT\t' in line): record.update({'citation_ipcr_classification': extract_classifications(line)}) record.update({'citation_ids': extract_citationIDs(application_identifier, line)}) for citation_id in record['citation_ids']: print(('evaluate citation id: ' + citation_id)) citations.update({citation_id: extract_citation_entry(citation_id, line.split('\tSRPRT\t')[1])}) elif ('\tPDFEP\t' in line): record.update({'publication_url': line.split('\tPDFEP\t')[1]}) records.update({application_identifier: record}) upload(records, INDEX_APPL, 'patent_eu') upload(citations, INDEX_CIT, 'citation_eu')
def createIndexPatentApplications(): settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0}, 'mappings': {'properties': {'application_number': {'type': 'keyword'}, 'application_category': {'type': 'keyword'}, 'application_date': {'type': 'date'}, 'title': {'type': 'text'}, 'abstract': {'type': 'text'}, 'description': {'type': 'text'}, 'claims': {'type': 'text'}, 'amended_claims': {'type': 'text'}, 'amended_claims_statements': {'type': 'text'}, 'citation_ipcr_classification': {'type': 'keyword'}, 'citation_ids': {'type': 'keyword'}, 'publication_url': {'type': 'text'}}}} es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.indices.create(index=INDEX_APPL, ignore=400, body=settings) print(response)
def createIndexCitations(): settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'index.mapping.ignore_malformed': True}, 'mappings': {'properties': {'dnum': {'type': 'keyword'}, 'publication_url': {'type': 'text'}, 'country': {'type': 'keyword'}, 'kind': {'type': 'keyword'}, 'doc_number': {'type': 'keyword'}, 'name': {'type': 'text'}, 'date': {'type': 'date'}, 'category_X': {'type': 'integer'}, 'category_P': {'type': 'integer'}, 'category_A': {'type': 'integer'}, 'category_D': {'type': 'integer'}, 'category_Y': {'type': 'integer'}, 'category_L': {'type': 'integer'}, 'category_O': {'type': 'integer'}, 'category_T': {'type': 'integer'}, 'category_E': {'type': 'integer'}, 'rel-passage_X': {'type': 'text'}, 'rel-passage_P': {'type': 'text'}, 'rel-passage_A': {'type': 'text'}, 'rel-passage_D': {'type': 'text'}, 'rel-passage_Y': {'type': 'text'}, 'rel-passage_L': {'type': 'text'}, 'rel-passage_O': {'type': 'text'}, 'rel-passage_T': {'type': 'text'}, 'rel-passage_E': {'type': 'text'}, 'nplcit': {'type': 'boolean'}}}} es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.indices.create(index=INDEX_CIT, ignore=400, body=settings) print(response)
def upload(records, index, type): client = connections.create_connection(hosts=['http://172.16.64.23:9200/']) res = helpers.bulk(client, gendata(records, index, type), index=index, chunk_size=1000, request_timeout=200) print(res)
def gendata(records, index, type): for (k, v) in zip(records.keys(), records.values()): (yield {'_index': index, '_id': k, '_source': v})
def extract_classifications(line): classifications_list = [] start_classification = line.find('<classifications-ipcr>') relative_end_classification = (line[start_classification:].find('</classifications-ipcr>') + 23) classification_string = line[start_classification:(start_classification + relative_end_classification)] try: treeRoot = ET.fromstring(classification_string) for classifications in treeRoot.findall('classification-ipcr'): for classification in classifications: classifications_list.append(classification.text) except: print(('error classification for line: ' + classification_string)) return classifications_list
def extract_citationIDs(application_identifier, line): words = line.split('\t')[6].split(' ') indices = [i for (i, x) in enumerate(words) if ('sr-cit' in x)] return [((application_identifier + '_') + words[i][(words[i].find('sr-cit') + 6):(words[i].find('sr-cit') + 10)]) for i in indices]
def normalize_claims(claims): normalized_claims = [] for claim in claims.split(','): if ('-' not in claim): normalized_claims.append(int(claim)) else: for number in range(int(claim.split('-')[0]), (int(claim.split('-')[1]) + 1)): normalized_claims.append(number) return normalized_claims
def extract_citation_entry(citation_id, searchreport_line): citation = {} start_citation = searchreport_line.find(('<citation id="sr-cit' + citation_id[(- 4):])) relative_end_citation = (searchreport_line[start_citation:].find('</citation>') + 11) citation_string = searchreport_line[start_citation:(start_citation + relative_end_citation)] try: treeRoot = ET.fromstring(citation_string) except: print(('error citation for line: ' + citation_string)) return citation treeRoot.findall('category') last_category = '' for element in treeRoot: if (element.tag == 'category'): last_category = element.text elif (element.tag == 'rel-claims'): for category in last_category.split(','): citation.update({(('category' + '_') + category): normalize_claims(element.text)}) elif (element.tag == 'rel-passage'): for category in last_category.split(','): for passage in element: old_rel_passage = citation.get((('rel-passage' + '_') + category)) if (old_rel_passage == None): old_rel_passage = '' citation.update({(('rel-passage' + '_') + category): (old_rel_passage + passage.text)}) elif (element.tag == 'patcit'): citation.update({'dnum': element.attrib['dnum']}) citation.update({'url': element.attrib['url']}) for subelement in element: if (subelement.tag == 'document-id'): for child in subelement: if (child.tag == 'country'): citation.update({'country': child.text}) elif (child.tag == 'doc-number'): citation.update({'doc-number': child.text}) elif (child.tag == 'kind'): citation.update({'kind': child.text}) elif (child.tag == 'name'): citation.update({'name': child.text}) elif (child.tag == 'date'): citation.update({'date': child.text}) elif (element.tag == 'nplcit'): citation.update({'nplcit': 'true'}) if ((last_category != '') and ((('rel-passage' + '_') + last_category.split(',')[0]) not in citation.keys())): print(((('Kategorie ohne rel-passage, Citation ID/String: ' + citation_id) + ' / ') + citation_string)) return citation
def main(file): f = open(file, 'r', encoding='utf8', errors='ignore') lines = f.readlines() records = {} citations = {} for line in lines: if ('\ten\t' in line): application_identifier = line.split('EP\t')[1].split('\ten\t')[0].replace('\t', '') application_number = line.split('EP\t')[1].split('\t')[0] application_category = line.split('EP\t')[1].split('\t')[1] application_date = line.split('EP\t')[1].split('\t')[2] if (application_date == ''): print(('Skipping entry, because of missing date: ' + application_identifier)) continue if (application_identifier not in records): records.update({application_identifier: {'application_number': application_number, 'application_category': application_category, 'application_date': application_date}}) record = records.get(application_identifier) if ('\tTITLE\t' in line): record.update({'title': line.split('\tTITLE\t')[1]}) elif ('\tABSTR\t' in line): record.update({'abstract': line.split('\tABSTR\t')[1]}) elif ('\tDESCR\t' in line): record.update({'description': line.split('\tDESCR\t')[1]}) elif ('\tCLAIM\t' in line): record.update({'claims': line.split('\tCLAIM\t')[1]}) elif ('\tAMEND\t' in line): record.update({'amended_claims': line.split('\tAMEND\t')[1]}) elif ('\tACSTM\t' in line): record.update({'amended_claims_statements': line.split('\tACSTM\t')[1]}) elif ('\tSRPRT\t' in line): record.update({'citation_ipcr_classification': extract_classifications(line)}) record.update({'citation_ids': extract_citationIDs(application_identifier, line)}) for citation_id in record['citation_ids']: print(('evaluate citation id: ' + citation_id)) citations.update({citation_id: extract_citation_entry(citation_id, line.split('\tSRPRT\t')[1])}) elif ('\tPDFEP\t' in line): record.update({'publication_url': line.split('\tPDFEP\t')[1]}) records.update({application_identifier: record}) upload(records, INDEX_APPL, 'patent_eu') upload(citations, INDEX_CIT, 'citation_eu')
def createIndexPatentApplications(): settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0}, 'mappings': {'properties': {'application_number': {'type': 'keyword'}, 'application_category': {'type': 'keyword'}, 'application_date': {'type': 'date'}, 'title': {'type': 'text'}, 'abstract': {'type': 'text'}, 'description': {'type': 'text'}, 'claims': {'type': 'text'}, 'amended_claims': {'type': 'text'}, 'amended_claims_statements': {'type': 'text'}, 'citation_ipcr_classification': {'type': 'keyword'}, 'citation_ids': {'type': 'keyword'}, 'publication_url': {'type': 'text'}}}} es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.indices.create(index=INDEX_APPL, ignore=400, body=settings) print(response)
def createIndexCitations(): settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'index.mapping.ignore_malformed': True}, 'mappings': {'properties': {'dnum': {'type': 'keyword'}, 'publication_url': {'type': 'text'}, 'country': {'type': 'keyword'}, 'kind': {'type': 'keyword'}, 'doc_number': {'type': 'keyword'}, 'name': {'type': 'text'}, 'date': {'type': 'date'}, 'category_X': {'type': 'integer'}, 'category_P': {'type': 'integer'}, 'category_A': {'type': 'integer'}, 'category_D': {'type': 'integer'}, 'category_Y': {'type': 'integer'}, 'category_L': {'type': 'integer'}, 'category_O': {'type': 'integer'}, 'category_T': {'type': 'integer'}, 'category_E': {'type': 'integer'}, 'rel-passage_X': {'type': 'text'}, 'rel-passage_P': {'type': 'text'}, 'rel-passage_A': {'type': 'text'}, 'rel-passage_D': {'type': 'text'}, 'rel-passage_Y': {'type': 'text'}, 'rel-passage_L': {'type': 'text'}, 'rel-passage_O': {'type': 'text'}, 'rel-passage_T': {'type': 'text'}, 'rel-passage_E': {'type': 'text'}, 'nplcit': {'type': 'boolean'}}}} es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.indices.create(index=INDEX_CIT, ignore=400, body=settings) print(response)
def upload(records, index, type): client = connections.create_connection(hosts=['http://172.16.64.23:9200/']) res = helpers.bulk(client, gendata(records, index, type), index=index, chunk_size=1000, request_timeout=200) print(res)
def query_exist_claim(): return {'query': {'bool': {'filter': [{'exists': {'field': 'citation_ids'}}, {'exists': {'field': 'claims'}}]}}}
def query_citation_id(citation_entry): return {'query': {'bool': {'filter': [{'exists': {'field': 'category_A'}}, {'ids': {'values': [citation_entry]}}]}}}
def process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column): print(response) all_response_patent_applications = response.get('hits').get('hits') for element in all_response_patent_applications: patent_application_id = element.get('_id') claims_text_raw = element.get('_source').get('claims') max_claim = int(claims_text_raw.split('<claim id="c-en-00')[(- 1)][:2]) for claim in range(1, (max_claim + 1)): for citation_id in element.get('_source').get('citation_ids'): print(citation_id) response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000) print(response_citation) try: response_citation.get('hits').get('hits')[0].get('_source') except: continue response_rel_claims = response_citation.get('hits').get('hits')[0].get('_source').get('category_A') response_rel_passage = response_citation.get('hits').get('hits')[0].get('_source').get('rel-passage_A') if (claim in response_rel_claims): try: application_claim_text_column.append(claims_text_raw.split((((('<claim id="c-en-00' + '{:02d}'.format(claim)) + '" num="00') + '{:02d}'.format(claim)) + '">'))[1].split('</claim>')[0]) except: print(((('Discarded Claim. ID: ' + str(claim)) + ', Patent Application ID: ') + str(patent_application_id))) continue patent_application_id_column.append(patent_application_id) patent_citation_column.append(citation_id) application_claim_number_column.append(claim) related_passages_against_claim_column.append(response_rel_passage) category_column.append('A')
def main(): patent_application_id_column = [] patent_citation_column = [] application_claim_number_column = [] application_claim_text_column = [] related_passages_against_claim_column = [] category_column = [] es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.search(index='ep_patent_applications', body=query_exist_claim(), scroll='2m') print(response) sid = response.get('_scroll_id') scroll_size = len(response['hits']['hits']) process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column) while (scroll_size > 0): 'Scrolling...' response = es.scroll(scroll_id=sid, scroll='2m') process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column) sid = response['_scroll_id'] scroll_size = len(response['hits']['hits']) column_data = {'patent_application_id': patent_application_id_column, 'patent_citation_id': patent_citation_column, 'application_claim_number': application_claim_number_column, 'application_claim_text': application_claim_text_column, 'related_passages_against_claim': related_passages_against_claim_column, 'category': category_column} print(column_data) df = pd.DataFrame(data=column_data, columns=['patent_application_id', 'patent_citation_id', 'application_claim_number', 'application_claim_text', 'related_passages_against_claim', 'category']) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', 30) df.to_csv('./frame_negativeSamples.csv')
def query_exist_claim(): return {'query': {'bool': {'filter': [{'exists': {'field': 'citation_ids'}}, {'exists': {'field': 'claims'}}]}}}
def query_citation_id(citation_entry): return {'query': {'bool': {'filter': [{'exists': {'field': 'category_X'}}, {'ids': {'values': [citation_entry]}}]}}}
def process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column): print(response) all_response_patent_applications = response.get('hits').get('hits') for element in all_response_patent_applications: patent_application_id = element.get('_id') claims_text_raw = element.get('_source').get('claims') max_claim = int(claims_text_raw.split('<claim id="c-en-00')[(- 1)][:2]) for claim in range(1, (max_claim + 1)): for citation_id in element.get('_source').get('citation_ids'): print(citation_id) response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000) print(response_citation) try: response_citation.get('hits').get('hits')[0].get('_source') except: continue response_rel_claims = response_citation.get('hits').get('hits')[0].get('_source').get('category_X') response_rel_passage = response_citation.get('hits').get('hits')[0].get('_source').get('rel-passage_X') if (claim in response_rel_claims): try: application_claim_text_column.append(claims_text_raw.split((((('<claim id="c-en-00' + '{:02d}'.format(claim)) + '" num="00') + '{:02d}'.format(claim)) + '">'))[1].split('</claim>')[0]) except: print(((('Discarded Claim. ID: ' + str(claim)) + ', Patent Application ID: ') + str(patent_application_id))) continue patent_application_id_column.append(patent_application_id) patent_citation_column.append(citation_id) application_claim_number_column.append(claim) related_passages_against_claim_column.append(response_rel_passage) category_column.append('X')
def main(): patent_application_id_column = [] patent_citation_column = [] application_claim_number_column = [] application_claim_text_column = [] related_passages_against_claim_column = [] category_column = [] es = Elasticsearch(hosts=['http://172.16.64.23:9200/']) response = es.search(index='ep_patent_applications', body=query_exist_claim(), scroll='2m') print(response) sid = response.get('_scroll_id') scroll_size = len(response['hits']['hits']) process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column) while (scroll_size > 0): 'Scrolling...' response = es.scroll(scroll_id=sid, scroll='2m') process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column) sid = response['_scroll_id'] scroll_size = len(response['hits']['hits']) column_data = {'patent_application_id': patent_application_id_column, 'patent_citation_id': patent_citation_column, 'application_claim_number': application_claim_number_column, 'application_claim_text': application_claim_text_column, 'related_passages_against_claim': related_passages_against_claim_column, 'category': category_column} print(column_data) df = pd.DataFrame(data=column_data, columns=['patent_application_id', 'patent_citation_id', 'application_claim_number', 'application_claim_text', 'related_passages_against_claim', 'category']) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.max_colwidth', 30) df.to_csv('./frame.csv')
def desirable(tag): return ((tag[0] in ['paragraph', '-', '[']) or ((tag[1] in ['CD']) and tag[0].isdigit()))
def syntax_right(tag_before_tag, tag): if (tag[1] != 'CD'): return True else: return (((tag[1] == 'CD') and ('paragraph' in tag_before_tag[0])) or ('[' in tag_before_tag[0]))
def text_is_range(tag_before_tag, tag, tag_after_tag): return ((tag_before_tag[1] == 'CD') and (tag[0] == '-') and (tag_after_tag[1] == 'CD'))
def extract_paragraphs(text): tokens = nltk.word_tokenize(text.lower().replace('paragraphs', 'paragraph')) pos_tags = nltk.pos_tag(tokens) pos_tags = [tag for tag in pos_tags if desirable(tag)] pos_tags = [tag for (tag_before_tag, tag) in zip(([('', '')] + pos_tags[:(- 1)]), pos_tags) if syntax_right(tag_before_tag, tag)] pos_tags = [tag for tag in pos_tags if ((not ('paragraph' in tag[0])) and (not ('[' in tag[0])))] pos_tags_ranges = [list(range(int(tag_before_tag[0]), (int(tag_after_tag[0]) + 1))) for (tag_before_tag, tag, tag_after_tag) in zip(([('', '')] + pos_tags[:(- 1)]), pos_tags, (pos_tags[1:] + [('', '')])) if text_is_range(tag_before_tag, tag, tag_after_tag)] pos_tags_numbers_only = [int(tag[0]) for tag in pos_tags if (tag[0] != '-')] end_result_paragraph_numbers = pos_tags_numbers_only for range_list in pos_tags_ranges: end_result_paragraph_numbers = (end_result_paragraph_numbers + range_list) end_result_paragraph_numbers = list(set(end_result_paragraph_numbers)) return end_result_paragraph_numbers
def getAccessToken(): payload = 'grant_type=client_credentials' usrPass = ((consumer_key + ':') + consumer_secret_key) b64Val = base64.b64encode(bytes(usrPass, 'utf-8')) header = {'authorization': ('Basic %s' % b64Val.decode('utf-8')), 'content-type': 'application/x-www-form-urlencoded'} request_token = requests.post(token_url, headers=header, data=payload) access_token = request_token.json()['access_token'] return access_token
def getEquivalents(number): access_token = getAccessToken() equivalent = [] payload = number header = {'authorization': ('Bearer %s' % access_token), 'content-type': 'text/plain'} request_equivalent = requests.post(request_url, headers=header, data=payload) response = request_equivalent.text try: root = ET.fromstring(response) for inquiry_result in list(root.iter('{http://ops.epo.org}equivalents-inquiry'))[0].iter('{http://ops.epo.org}inquiry-result'): for publication_reference in inquiry_result.iter('{http://www.epo.org/exchange}publication-reference'): for document_id in publication_reference.iter('{http://www.epo.org/exchange}document-id'): for doc_number in document_id.iter('{http://www.epo.org/exchange}doc-number'): equivalent.append(doc_number.text) print(equivalent) except: print(('unexpected response or no equivalents :' + number)) return equivalent
def query_patent_citation_country_docNumber(id): return {'query': {'bool': {'filter': [{'ids': {'values': [id]}}]}}}
def elasticSearch_process(id): response_citation = es.search(index='ep_patent_citations', body=query_patent_citation_country_docNumber(id), size=10000) try: country = response_citation.get('hits').get('hits')[0].get('_source').get('country') docNumber = response_citation.get('hits').get('hits')[0].get('_source').get('doc-number') print((country + docNumber)) return (country + docNumber) except: return 'error es_response'
def getPatentCitationIds(csv_path): list_of_patent_citation_ids = [] list_of_equivalents_lists = [] dataframe = pd.read_csv(csv_path, header=0, skiprows=range(1, 2767211)) patent_citation_id_iterator = dataframe['patent_citation_id'] for id in patent_citation_id_iterator.unique(): list_of_patent_citation_ids.append(id) citation_identifier = elasticSearch_process(id) if (citation_identifier is not 'error es_response'): equivalents_list = getEquivalents(citation_identifier) else: equivalents_list = ['error es_response'] list_of_equivalents_lists.append(equivalents_list) time.sleep(6.0) return pd.DataFrame({'patent_citation_id': list_of_patent_citation_ids, 'equivalents': list_of_equivalents_lists})
def process_csv(path): global counter_error global counter_success with open(path) as f: lines = f.readlines() follow_up_next_line = False current_id = '' for line in lines: if (follow_up_next_line is True): equivalents_list = line.replace('[', '').replace(']', '').replace("'", '').rstrip().split(', ') equivalents_list.append(current_id) equivalents_list = list(dict.fromkeys(equivalents_list)) column_id.append(current_id) column_equivalents.append(equivalents_list) follow_up_next_line = False counter_success = (counter_success + 1) elif str.__contains__(line, 'unexpected response or no equivalents :'): counter_error = (counter_error + 1) else: current_id = line.rstrip() follow_up_next_line = True
def elasticsearch_request_getDnum(citation_id): return {'query': {'bool': {'filter': [{'ids': {'values': [citation_id]}}]}}}
def elasticsearch_request_getParagraphText(application_number, application_category): return {'query': {'bool': {'filter': [{'term': {'application_number': application_number}}, {'term': {'application_category': application_category}}]}}}
def getPatentDetails(citation_id): response = es.search(index='ep_patent_citations', body=elasticsearch_request_getDnum(citation_id)) print(response) try: dnum = response['hits']['hits'][0]['_source']['dnum'] docNumber = response['hits']['hits'][0]['_source']['doc-number'] patentCountry = response['hits']['hits'][0]['_source']['country'] patentCategory = response['hits']['hits'][0]['_source']['kind'] except: return 'not found' return (dnum, docNumber, patentCountry, patentCategory)
def dataframeToDict(dataframe, dictionary): for (index, entry) in dataframe.iterrows(): id_list = entry['equivalent_patents'].strip('][').split(', ') clean_id_list = [] for value in id_list: clean_id_list.append(value.replace("'", '')) dictionary[entry['patent_id']] = clean_id_list return dictionary
def getParagraphText(dnum, application_category, paragraphs): response = es.search(index='ep_patent_applications', body=elasticsearch_request_getParagraphText(dnum, application_category)) try: paragraph_field = response['hits']['hits'][0]['_source']['description'] except: return 'not found' extracted_paragraph = '' for paragraph in paragraphs: found_paragraph_position_start = paragraph_field.find((((('<p id="p' + ('%04d' % int(paragraph))) + '" num="') + ('%04d' % int(paragraph))) + '">')) found_paragraph_position_end = (paragraph_field.find('</p>', found_paragraph_position_start) + 3) extracted_paragraph = ((extracted_paragraph + ' ') + paragraph_field[found_paragraph_position_start:found_paragraph_position_end]) return extracted_paragraph
def getParagraphFromText(paragraphsText, paragraphNumber): found_paragraph_position_start = paragraphsText.find((((('<p id="p' + ('%04d' % int(paragraphNumber))) + '" num="') + ('%04d' % int(paragraphNumber))) + '">')) found_paragraph_position_end = (paragraphsText.find('</p', found_paragraph_position_start) + 3) extracted_paragraph = paragraphsText[found_paragraph_position_start:found_paragraph_position_end] return extracted_paragraph
def execute(): path = '/mnt/data/datasets/patents/patent_matching' positives = pd.read_csv((path + '/positives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str}) negatives = pd.read_csv((path + '/negatives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str}) sample_size = 1.0 positives = positives[['application_claim_text', 'patent_searchReport_paragraph']] positives['label'] = '1' positives = positives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'}) negatives = negatives[['application_claim_text', 'patent_searchReport_paragraph']] negatives['label'] = '0' negatives = negatives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'}) allSamples = positives.append(negatives).dropna() allSamples['text_b'] = allSamples['text_b'].str.replace('<\\/p', '', regex=True) allSamples['text'] = allSamples['text'].str.replace('<\\/p', '', regex=True) allSamples['text_b'] = allSamples['text_b'].str.replace('\\<.+?\\>', '', regex=True) allSamples['text'] = allSamples['text'].str.replace('\\<.+?\\>', '', regex=True) allSamples['text_b'] = allSamples['text_b'].str.replace('--\\>', '', regex=True) allSamples['text'] = allSamples['text'].str.replace('--\\>', '', regex=True) allSamples['text_b'] = allSamples['text_b'].str.replace('"', '', regex=True) allSamples['text'] = allSamples['text'].str.replace('"', '', regex=True) allSamples['text_b'] = allSamples['text_b'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True) allSamples['text'] = allSamples['text'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True) allSamples['text_b'].replace('^\\s', '', regex=True, inplace=True) allSamples['text'].replace('^\\s', '', regex=True, inplace=True) allSamples['text_b'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True) allSamples['text'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True) allSamples['text_b'].replace('^[\\s]*$', np.nan, regex=True, inplace=True) allSamples['text'].replace('^[\\s]*$', np.nan, regex=True, inplace=True) allSamples = allSamples.sort_values(by=['text']).dropna() (train, test_dev) = train_test_split(allSamples, test_size=0.2, shuffle=False) (test, dev) = train_test_split(test_dev, test_size=0.5, shuffle=False) train = train.sample(frac=sample_size) test = test.sample(frac=sample_size) dev = dev.sample(frac=sample_size) print('Check for intersection values:') print('Train in Test') print(train['text'].isin(test['text']).value_counts()) print('Train in Dev') print(train['text'].isin(dev['text']).value_counts()) print('Test in Dev') print(test['text'].isin(dev['text']).value_counts()) train.to_csv((path + '/train.tsv'), sep='\t', index=False) test.to_csv((path + '/test.tsv'), sep='\t', index=False) dev.to_csv((path + '/dev.tsv'), sep='\t', index=False)
def query_citation_id(citation_entry): return {'query': {'ids': {'values': [citation_entry]}}}
def process_hits(response, column_id_pa, column_cit_srprt, column_category_P, column_category_A, column_category_D, column_category_Y, column_category_L, column_category_O, column_category_T, column_category_E, column_category_X): all_response_patent_applications = response.get('hits').get('hits') for element in all_response_patent_applications: element_id_pa = element.get('_id') for citation_id in element.get('_source').get('citation_ids'): column_id_pa.append(element_id_pa) column_cit_srprt.append(citation_id) response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000, filter_path=['hits.total.value', 'hits.hits']) response_citation_entry = response_citation.get('hits').get('hits')[0].get('_source') column_category_P.append((response_citation_entry.get('category_P') != None)) column_category_A.append((response_citation_entry.get('category_A') != None)) column_category_D.append((response_citation_entry.get('category_D') != None)) column_category_Y.append((response_citation_entry.get('category_Y') != None)) column_category_L.append((response_citation_entry.get('category_L') != None)) column_category_O.append((response_citation_entry.get('category_O') != None)) column_category_T.append((response_citation_entry.get('category_T') != None)) column_category_E.append((response_citation_entry.get('category_E') != None)) column_category_X.append((response_citation_entry.get('category_X') != None))
def read_file(f): with open(f, 'r') as f: return json.load(f)
def get_results(results, dataset, name): if (dataset not in datasets_mt_few_shot): res = {k: round((v['acc'] * 100), 1) for (k, v) in results.items()} else: res = {k.replace(name, 'few-shot'): round((v['acc'] * 100), 1) for (k, v) in results.items()} res = dict(sorted(res.items())) task_names = list(res.keys()) results_dataset = defaultdict(dict) dataset_names = set([task[:(- 3)] for task in task_names]) for dataset_name in dataset_names: results_dataset[dataset_name] = defaultdict(dict) for task in task_names: dataset_name = task[:(- 3)] lang = task[(- 2):] results_dataset[dataset_name][lang] = res[task] for (dataset_name, resu) in results_dataset.items(): values = [v for (k, v) in resu.items() if (k != 'en')] results_dataset[dataset_name]['avg'] = round((sum(values) / len(values)), 1) for (resource, langs) in languages.items(): values = [v for (k, v) in resu.items() if (k in langs)] if (len(values) > 0): results_dataset[dataset_name][resource] = round((sum(values) / len(values)), 1) return results_dataset
def get_all_results(models, datasets): all_results = defaultdict(dict) for (model, names) in models.items(): for dataset in datasets: for name in names: shots = (8 if ('mgsm' in dataset) else 0) if (not os.path.exists(f'../results/{model}/{name}/{name}_{dataset}_{shots}-shot.json')): print(f'../results/{model}/{name}/{name}_{dataset}_{shots}-shot.json') continue output = read_file(f'../results/{model}/{name}/{name}_{dataset}_{shots}-shot.json') results = get_results(output['results'], dataset, name) for dataset_name in results: if (dataset_name not in all_results): all_results[dataset_name] = defaultdict(dict) all_results[dataset_name][name].update(results[dataset_name]) return all_results
def get_dataframes(all_results, datasets): results_avg = pd.DataFrame() for dataset in datasets: results = pd.DataFrame(all_results[dataset]).T results['dataset'] = dataset results['model'] = [models_reverse[model] for model in results.index] results['size'] = model_sizes_all[:len(results)] results_avg['model'] = results['model'] results_avg['size'] = results['size'] results_avg[dataset] = results['avg'] (yield results) size = results_avg['size'] results_avg = results_avg.drop(columns=['size']) results_avg['avg'] = results_avg.mean(axis=1).round(1) results_avg['dataset'] = 'avg' results_avg['size'] = size (yield results_avg)
def plot_size_df_models(df, langs=False): df.set_index('size', inplace=True) df.groupby('model')['avg'].plot(x='size', y='acc', title=list(df['dataset'])[0], legend=True, marker='o') plt.xscale('log') plt.xticks(model_sizes_all, model_sizes_all, rotation='vertical') plt.show() if langs: for lang in df.columns: if (lang in ['dataset', 'model', 'avg', 'size']): continue df.groupby('model')[lang].plot(x='size', title=f"{list(df['dataset'])[0]}_{lang}", legend=True, marker='o') plt.xscale('log') plt.xticks(model_sizes_all, model_sizes_all, rotation='vertical') plt.show()
def get_dataframes_model(all_results, datasets, model_name, divide=False): dataset_keys = list(all_results.keys()) if divide: df_avg_self = pd.DataFrame() df_avg_mt = pd.DataFrame() else: df_avg = {} for average in (['avg'] + list(languages.keys())): df_avg[average] = pd.DataFrame() for dataset in datasets: dfs = [] for dataset_key in dataset_keys: if dataset_key.startswith(dataset): if (model_name == 'open_llama_v2'): if (not any([(model.lower() in models['open_llama_v2']) for model in all_results[dataset_key]])): continue elif (not any([(model_name in model.lower()) for model in all_results[dataset_key]])): continue if (('600M' in dataset_key) or ('1.3B' in dataset_key)): continue results = pd.DataFrame(all_results[dataset_key]).T results['dataset'] = dataset_key results['model'] = [models_reverse[model] for model in results.index] results = results[(results['model'] == model_name)] results.drop(columns=['model'], inplace=True) results['model'] = model_name results['size'] = model_sizes[model_name][:len(results)] dfs.append(results) df_concat = pd.concat(dfs) df_concat = df_concat.sort_values(['size', 'dataset']) df_concat = df_concat.reindex(columns=(['model', 'size', 'dataset'] + [col for col in df_concat.columns if (col not in ['model', 'size', 'dataset'])])) df_concat['dataset'] = df_concat['dataset'].str.replace(dataset, 'Direct') df_concat['dataset'] = df_concat['dataset'].str.replace('Direct-mt_few-shot', 'Self-translate') df_concat['dataset'] = df_concat['dataset'].str.replace('Direct-mt_nllb-200-3.3B', 'MT (NLLB)') if divide: df_concat_self = df_concat[df_concat['dataset'].isin(['Direct', 'Self-MT'])] df_concat_mt = df_concat[df_concat['dataset'].isin(['Self-MT', 'MT'])] df_avg_self['model'] = df_concat_self['model'] df_avg_self['size'] = df_concat_self['size'] df_avg_self['dataset'] = df_concat_self['dataset'] df_avg_self[dataset] = df_concat_self['avg'] df_avg_mt['model'] = df_concat_mt['model'] df_avg_mt['size'] = df_concat_mt['size'] df_avg_mt['dataset'] = df_concat_mt['dataset'] df_avg_mt[dataset] = df_concat_mt['avg'] (yield df_concat_self) (yield df_concat_mt) else: for average in (['avg'] + list(languages.keys())): df_avg[average]['model'] = df_concat['model'] df_avg[average]['size'] = df_concat['size'] df_avg[average]['dataset'] = df_concat['dataset'] df_avg[average][dataset] = df_concat[average] (yield df_concat) if divide: size = df_avg_self['size'] df_avg_self = df_avg_self.drop(columns=['size']) size = df_avg_mt['size'] df_avg_mt = df_avg_mt.drop(columns=['size']) df_avg_self['avg'] = df_avg_self.mean(axis=1).round(1) df_avg_mt['avg'] = df_avg_mt.mean(axis=1).round(1) df_avg_self['size'] = size df_avg_mt['size'] = size (yield df_avg_self) (yield df_avg_mt) else: for average in (['avg'] + list(languages.keys())): size = df_avg[average]['size'] df_avg[average] = df_avg[average].drop(columns=['size']) df_avg[average][average] = df_avg[average].mean(axis=1).round(1) df_avg[average]['size'] = size (yield df_avg[average])
def plot_size_df_datasets(df, model_name, title, langs=False): titles = {'low': 'Low-resource languages', 'high': 'High-resource languages', 'avg': 'Average'} df.set_index('size', inplace=True) for average in (['avg'] + list(languages.keys())): if (average not in df.columns): continue df.groupby('dataset')[average].plot(x='size', y='acc', title=f'{title} {titles[average]}', ylabel='Average accuracy', xlabel='Model size (B)', legend=True, marker='o') plt.xscale('log') plt.xticks(model_sizes[model_name], model_sizes[model_name], rotation='vertical') if (title == ''): plt.ylim(46, 62) plt.savefig(f'plots/{model_name}_{average}.pdf', bbox_inches='tight') plt.show() if langs: for lang in df.columns: if (lang in ['dataset', 'avg', 'size']): continue df.groupby('dataset')[lang].plot(x='size', y='acc', title=f'{title}_{lang}', ylabel='Accuracy', xlabel='Model size (B)', legend=True, marker='o') plt.xscale('log') plt.xticks(model_sizes[model_name], model_sizes[model_name], rotation='vertical') plt.show()
def get_metrics(): metrics_dict = defaultdict(dict) for dataset_name in _DATASETS: for model_name in _MODELS: if ((model_name == 'bloom-560m') and (dataset_name == 'xnli')): with open(f'metrics/{dataset_name}/bloom-1b1.json') as f: metrics_dict[dataset_name][model_name] = json.load(f) else: with open(f'metrics/{dataset_name}/{model_name}.json') as f: metrics_dict[dataset_name][model_name] = json.load(f) for language in metrics_dict[dataset_name][model_name]: avg = defaultdict(float) for field in metrics_dict[dataset_name][model_name][language]: for (metric, value) in metrics_dict[dataset_name][model_name][language][field].items(): avg[metric] += value for metric in avg: avg[metric] /= len(metrics_dict[dataset_name][model_name][language]) avg[metric] = round(avg[metric], 2) metrics_dict[dataset_name][model_name][language]['avg'] = dict(avg) return dict(metrics_dict)
def add_avg(metrics_dict): metrics_dict_split = defaultdict(dict) for metric in ['sacrebleu', 'chrf++', 'comet']: metrics_dict_split[metric] = deepcopy(metrics_dict) for dataset_name in metrics_dict: for model_name in metrics_dict[dataset_name]: '\n if model_name == "bloom-560m" and dataset_name == "xnli":\n continue\n ' for (language, language_dict) in metrics_dict[dataset_name][model_name].items(): avg = metrics_dict[dataset_name][model_name][language]['avg'] metrics_dict_split[metric][dataset_name][model_name][language] = avg.get(metric, 0) metrics_dict_split[metric][dataset_name][model_name]['avg'] = round((sum(metrics_dict_split[metric][dataset_name][model_name].values()) / len(metrics_dict_split[metric][dataset_name][model_name])), 2) items = metrics_dict_split[metric][dataset_name][model_name] values = items.values() metrics_dict_split[metric][dataset_name][model_name]['avg'] = round((sum(values) / len(values)), 1) for (resource, langs) in languages.items(): values = [v for (k, v) in items.items() if (k in langs)] if (len(values) > 0): metrics_dict_split[metric][dataset_name][model_name][resource] = round((sum(values) / len(values)), 1) return dict(metrics_dict_split)
def plot_size_df_datasets(df, model_name, title, langs=False): df.set_index('size', inplace=True) df_model = df[(df['model'] == model_name)] for average in (['avg'] + list(languages.keys())): if (average not in df.columns): continue df_model[average].plot(x='size', y='acc', title=(f'{title} {average}' if title else ''), ylabel='Average COMET', xlabel='Model size (B)', legend=True, marker='o', label='Self-translate', color='C2') plt.axhline(y=df.loc[3.3][average], color='C1', linestyle='--', label='MT (NLLB)') plt.legend() plt.xscale('log') plt.ylim(55, 90) plt.xticks(model_sizes[model_name], model_sizes[model_name], rotation='vertical') if (title == ''): plt.savefig(f'plots/{average}.pdf', bbox_inches='tight') plt.show() if langs: for lang in df.columns: if (lang in ['dataset', 'avg', 'size']): continue df[lang].plot(x='size', y='acc', title=f'{title}_{lang}', ylabel='BLEU', xlabel='Model size (B)', legend=True, marker='o') plt.xscale('log') plt.xticks(model_sizes[model_name], model_sizes[model_name], rotation='vertical') plt.show()
def get_dataframes_model(metrics_dict_split, model_name): for metric in ['comet']: df_avg = {} for average in (['avg'] + list(languages.keys())): df_avg[average] = pd.DataFrame({'model': _MODELS}, index=_MODELS) for dataset_name in metrics_dict_split[metric]: df = pd.DataFrame(metrics_dict_split[metric][dataset_name]).T for average in (['avg'] + list(languages.keys())): df_avg[average][dataset_name] = df[average] df['model'] = model_names_all df['size'] = model_sizes_all df = df.reindex(columns=(['model', 'size'] + [col for col in df.columns if (col not in ['model', 'size'])])) display(df) print(df.to_latex(index=False)) plot_size_df_datasets(df, model_name, f'{dataset_name} {metric}') for average in (['avg'] + list(languages.keys())): df_avg[average][average] = df_avg[average].mean(axis=1).round(1) df_avg[average]['model'] = model_names_all df_avg[average]['size'] = model_sizes_all df_avg[average] = df_avg[average].reindex(columns=(['model', 'size'] + [col for col in df_avg[average].columns if (col not in ['model', 'size'])])) display(df_avg[average]) print(df_avg[average].to_latex(index=False)) plot_size_df_datasets(df_avg[average], model_name, title='')
def get_dataset(dataset_args: Dict[(str, str)]) -> DatasetDict: '\n Loads the dataset using the dataset_args.\n\n Args:\n - dataset_args (dict): A dictionary containing the dataset name, split, and configurations.\n\n Returns:\n - dataset (DatasetDict): A dictionary containing the dataset.\n ' dataset = DatasetDict() if (dataset_args['dataset'] == 'xcopa'): dataset['en'] = load_dataset('super_glue', 'copa', split=dataset_args['dataset_split']) else: dataset['en'] = load_dataset(dataset_args['dataset'], 'en', split=dataset_args['dataset_split']) for config in dataset_args['dataset_configs']: dataset[config] = load_dataset(dataset_args['dataset'], config, split=dataset_args['dataset_split']) return dataset
def get_dataset_mt(dataset_args: Dict[(str, str)], model: str) -> DatasetDict: '\n Loads the machine translation dataset using the dataset_args and model.\n\n Args:\n - dataset_args (dict): A dictionary containing the dataset name, split, and configurations.\n - model (str): The name of the model.\n\n Returns:\n - dataset (DatasetDict): A dictionary containing the machine translation dataset.\n ' dataset = DatasetDict() for config in dataset_args['dataset_configs']: dataset[config] = load_dataset(dataset_args['dataset_mt'], model, split=config) return dataset
def get_texts(dataset: DatasetDict, dataset_args: Dict[(str, str)]) -> DefaultDict[(str, Dict[(str, List[str])])]: '\n Extracts the texts from the dataset.\n\n Args:\n - dataset (DatasetDict): A dictionary containing the dataset.\n - dataset_args (dict): A dictionary containing the dataset name, split, and configurations.\n\n Returns:\n - texts (defaultdict): A dictionary containing the texts for each configuration and field.\n ' texts = defaultdict(dict) for config in dataset: for field in dataset_args['dataset_fields']: texts[config][field] = dataset[config][field] return texts
def load_comet(model_name: str='Unbabel/wmt22-comet-da'): '\n Loads the COMET model from a checkpoint.\n\n Args:\n - model_name (str): The name of the COMET model.\n\n Returns:\n - model: The loaded COMET model.\n ' model_path = download_model(model_name) model = load_from_checkpoint(model_path) return model
@find_executable_batch_size(starting_batch_size=2048) def compute_comet(batch_size: int, model: load_from_checkpoint, predictions: List[str], references: List[str], sources: List[str], gpus: Optional[int]=None, progress_bar: bool=False) -> Dict[(str, float)]: '\n Computes the COMET score for a batch of translations.\n\n Args:\n - batch_size (int): The batch size for the COMET model.\n - model (load_from_checkpoint): The loaded COMET model.\n - predictions (List[str]): A list of translated sentences.\n - references (List[str]): A list of reference sentences.\n - sources (List[str]): A list of source sentences.\n - gpus (int): The number of GPUs to use for computation.\n - progress_bar (bool): Whether to display a progress bar during computation.\n\n Returns:\n - model_output (Dict[str, float]): A dictionary containing the COMET score for each sentence.\n ' if (gpus is None): gpus = (1 if torch.cuda.is_available() else 0) data = {'src': sources, 'mt': predictions, 'ref': references} data = [dict(zip(data, t)) for t in zip(*data.values())] model_output = model.predict(data, batch_size=batch_size, gpus=gpus, progress_bar=progress_bar) return model_output
def evaluate_translations(predictions: List[str], references: List[str], sources: List[str]) -> Dict[(str, float)]: '\n Evaluates the translations using sacrebleu, chrf and comet metrics.\n\n Args:\n - predictions (List[str]): A list of predicted translations.\n - references (List[str]): A list of reference translations.\n - sources (List[str]): A list of source sentences.\n\n Returns:\n - result_dictionary (Dict[str, float]): A dictionary containing the evaluation results for each metric.\n ' print('Loading sacrebleu...') sacrebleu = evaluate.load('sacrebleu') print('Loading chrf...') chrf = evaluate.load('chrf') print('Loading comet...') model = load_comet('Unbabel/wmt22-comet-da') result_dictionary = {} print(f'Computing sacrebleu') sacrebleu_results = sacrebleu.compute(predictions=predictions, references=references) result_dictionary['sacrebleu'] = round(sacrebleu_results['score'], 2) print(f'Computing chrf score') chrf_results = chrf.compute(predictions=predictions, references=references, word_order=2) result_dictionary['chrf++'] = round(chrf_results['score'], 2) print('Computing comet score') comet_results = compute_comet(model=model, predictions=predictions, references=references, sources=sources, progress_bar=True) comet_results['mean_score'] = (sum(comet_results['scores']) / len(comet_results['scores'])) result_dictionary['comet'] = round((comet_results['mean_score'] * 100), 2) print(result_dictionary) return result_dictionary
def evaluate_texts(predictions: DefaultDict[(str, Dict[(str, List[str])])], references: DefaultDict[(str, Dict[(str, List[str])])], dataset_args: Dict[(str, str)], model_name: str) -> None: '\n Evaluates the translations for each configuration and field.\n\n Args:\n - predictions (defaultdict): A dictionary containing the predicted translations for each configuration and field.\n - references (defaultdict): A dictionary containing the reference translations for each configuration and field.\n - dataset_args (dict): A dictionary containing the dataset name, split, and configurations.\n - model_name (str): The name of the model.\n ' evaluations = {} for config in predictions: evaluations[config] = {} print(f'Evaluating config {config}') for field in dataset_args['dataset_fields']: print(f'Evaluating field {field}') evaluations[config][field] = evaluate_translations(predictions=predictions[config][field], references=references['en'][field], sources=references[config][field]) save_file(evaluations, dataset_args, model_name)
def save_file(evaluations: Dict[(str, Dict[(str, Dict[(str, float)])])], dataset_args: Dict[(str, str)], model_name: str) -> None: '\n Saves the evaluation results to a file.\n\n Args:\n - evaluations (dict): A dictionary containing the evaluation results for each configuration and field.\n - dataset_args (dict): A dictionary containing the dataset name, split, and configurations.\n - model_name (str): The name of the model.\n ' dirname = f"metrics/{dataset_args['dataset'].split('/')[(- 1)]}" if (not os.path.exists(dirname)): os.makedirs(dirname) filename = f'{dirname}/{model_name}.json' with open(filename, 'w', encoding='utf-8') as file: json.dump(evaluations, file, indent=2)
def main() -> None: '\n Main function that evaluates the translations for each dataset and model.\n ' for dataset_name in _DATASETS: dataset_args = dataset_configs[dataset_name] print('Evaluating dataset', dataset_name) dataset = get_dataset(dataset_args) references = get_texts(dataset, dataset_args) for model_name in _MODELS: if ((model_name == 'bloom-560m') and (dataset_name == 'xnli')): continue print('Evaluating model', model_name) dataset_mt = get_dataset_mt(dataset_args, model_name) predictions = get_texts(dataset_mt, dataset_args) evaluate_texts(predictions, references, dataset_args, model_name)
def count_lines(input_list: List[str]) -> int: '\n Counts the number of lines in a list of strings.\n\n Args:\n input_list (List[str]): List of strings.\n\n Returns:\n int: Number of lines in the list.\n ' return len(input_list)
class DatasetReader(IterableDataset): def __init__(self, sentences: List[str], tokenizer, max_length: int=128): '\n Initializes the DatasetReader class.\n\n Args:\n sentences (List[str]): List of sentences.\n tokenizer: Tokenizer object.\n max_length (int, optional): Maximum length of the tokenized sentence. Defaults to 128.\n ' self.sentences = sentences self.tokenizer = tokenizer self.max_length = max_length self.current_line = 0 self.total_lines = count_lines(sentences) print(f'{self.total_lines} lines in list') def preprocess(self, text: str) -> dict: '\n Preprocesses a sentence by tokenizing it.\n\n Args:\n text (str): Input sentence.\n\n Returns:\n dict: Tokenized sentence.\n ' self.current_line += 1 text = text.rstrip().strip() if (len(text) == 0): print(f'Warning: empty sentence at line {self.current_line}') return self.tokenizer(text, padding=False, truncation=True, max_length=self.max_length, return_tensors=None) def __iter__(self): mapped_itr = map(self.preprocess, self.sentences) return mapped_itr def __len__(self) -> int: return self.total_lines
class ParallelTextReader(IterableDataset): def __init__(self, predictions: List[str], references: List[str]): '\n Initializes the ParallelTextReader class.\n\n Args:\n predictions (List[str]): List of predicted sentences.\n references (List[str]): List of reference sentences.\n ' self.predictions = predictions self.references = references predictions_lines = count_lines(predictions) references_lines = count_lines(references) assert (predictions_lines == references_lines), f'Lines in predictions and references do not match {predictions_lines} vs {references_lines}' self.num_sentences = references_lines self.current_line = 0 def preprocess(self, pred: str, gold: str) -> Tuple[(str, List[str])]: '\n Preprocesses a predicted and a reference sentence by stripping them.\n\n Args:\n pred (str): Predicted sentence.\n gold (str): Reference sentence.\n\n Returns:\n Tuple[str, List[str]]: Tuple containing the predicted sentence and a list with the reference sentence.\n ' self.current_line += 1 pred = pred.rstrip().strip() gold = gold.rstrip().strip() if (len(pred) == 0): print(f'Warning: Pred empty sentence at line {self.current_line}') if (len(gold) == 0): print(f'Warning: Gold empty sentence at line {self.current_line}') return (pred, [gold]) def __iter__(self): mapped_itr = map(self.preprocess, self.predictions, self.references) return mapped_itr def __len__(self) -> int: return self.num_sentences
def encode_string(text): return text.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
def get_dataloader(accelerator: Accelerator, translate_data, tokenizer: PreTrainedTokenizerBase, batch_size: int, max_length: int) -> DataLoader: dataset = DatasetReader(translate_data, tokenizer, max_length) if (accelerator.distributed_type == DistributedType.TPU): data_collator = DataCollatorForSeq2Seq(tokenizer, padding='max_length', max_length=max_length, label_pad_token_id=tokenizer.pad_token_id, return_tensors='pt') else: data_collator = DataCollatorForSeq2Seq(tokenizer, padding=True, label_pad_token_id=tokenizer.pad_token_id, pad_to_multiple_of=8, return_tensors='pt') return DataLoader(dataset, batch_size=batch_size, collate_fn=data_collator, num_workers=0)
def main(source_lang: str, target_lang: str, starting_batch_size: int, model_name: str='facebook/m2m100_1.2B', cache_dir: str=None, precision: str='32', max_length: int=128, num_beams: int=4, num_return_sequences: int=1, do_sample: bool=False, temperature: float=1.0, top_k: int=50, top_p: float=1.0, keep_special_tokens: bool=False, sentences_path: str=None, output_path: str=None, sentences_list: str=None, return_output: bool=False): if (not return_output): os.makedirs(os.path.abspath(os.path.dirname(output_path)), exist_ok=True) accelerator = Accelerator(mixed_precision=(precision if (precision != '32') else 'no'), split_batches=False, dispatch_batches=False) print(f'Loading tokenizer {model_name}...') tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir) print(f'Loading model {model_name}...') model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir) model.eval() print(f'''Preparing data... ''') if (precision == '32'): model = model.float() elif (precision == 'fp16'): model = model.half() elif (precision == 'bf16'): model = model.bfloat16() else: raise ValueError('Precision not supported. Supported values: 32, fp16, bf16') try: _ = tokenizer.lang_code_to_id[source_lang] except KeyError: raise KeyError(f'Language {source_lang} not found in tokenizer. Available languages: {tokenizer.lang_code_to_id.keys()}') tokenizer.src_lang = source_lang try: lang_code_to_idx = tokenizer.lang_code_to_id[target_lang] except KeyError: raise KeyError(f'Language {target_lang} not found in tokenizer. Available languages: {tokenizer.lang_code_to_id.keys()}') gen_kwargs = {'max_length': max_length, 'num_beams': num_beams, 'num_return_sequences': num_return_sequences, 'do_sample': do_sample, 'temperature': temperature, 'top_k': top_k, 'top_p': top_p} total_lines: int = (count_lines(sentences_path) if (sentences_list == None) else len(sentences_list)) if accelerator.is_main_process: print(f'''** Translation ** Input file: {sentences_path} Output file: {output_path} Source language: {source_lang} Target language: {target_lang} Starting batch size: {starting_batch_size} Device: {str(accelerator.device).split(':')[0]} Num. Devices: {accelerator.num_processes} Distributed_type: {accelerator.distributed_type} Max length: {max_length} Precision: {model.dtype} Model: {model_name} ''') print('** Generation parameters **') print('\n'.join((f'{k}: {v}' for (k, v) in gen_kwargs.items()))) print('\n') def save_sentences(tgt_text: list): nonlocal return_output, output_path if return_output: save_sentences.sentences.extend(tgt_text) else: print('\n'.join(tgt_text), file=save_sentences.f) if (not return_output): save_sentences.f = open(output_path, 'w', encoding='utf-8') save_sentences.sentences = [] @find_executable_batch_size(starting_batch_size=starting_batch_size) def inference(batch_size): nonlocal model, tokenizer, sentences_path, max_length, output_path, lang_code_to_idx, gen_kwargs, precision, sentences_list, return_output print(f'Translating with batch size {batch_size}') translate_data = (sentences_path if (sentences_list == None) else sentences_list) data_loader = get_dataloader(accelerator=accelerator, translate_data=translate_data, tokenizer=tokenizer, batch_size=batch_size, max_length=max_length) (model, data_loader) = accelerator.prepare(model, data_loader) samples_seen: int = 0 with tqdm(total=total_lines, desc='Dataset translation', leave=True, ascii=True, disable=(not accelerator.is_main_process)) as pbar: with torch.no_grad(): for (step, batch) in enumerate(data_loader): batch['input_ids'] = batch['input_ids'] batch['attention_mask'] = batch['attention_mask'] generated_tokens = accelerator.unwrap_model(model).generate(**batch, forced_bos_token_id=lang_code_to_idx, **gen_kwargs) generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id) generated_tokens = accelerator.gather(generated_tokens).cpu().numpy() tgt_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=(not keep_special_tokens)) if accelerator.is_main_process: if (step == (math.ceil((math.ceil((total_lines / batch_size)) / accelerator.num_processes)) - 1)): tgt_text = tgt_text[:((total_lines * num_return_sequences) - samples_seen)] else: samples_seen += len(tgt_text) save_sentences([encode_string(sentence) for sentence in tgt_text]) pbar.update((len(tgt_text) // gen_kwargs['num_return_sequences'])) inference() print(f'''Translation done. ''') if return_output: return save_sentences.sentences
def get_dataset(dataset_args: Dict[(str, Any)]) -> DatasetDict: '\n Load the dataset specified in dataset_args and return a DatasetDict object.\n\n Args:\n - dataset_args: A dictionary containing the dataset name, dataset configurations, dataset split.\n\n Returns:\n - A DatasetDict object containing the loaded dataset.\n ' dataset = DatasetDict() for config in dataset_args['dataset_configs']: dataset[config] = load_dataset(dataset_args['dataset'], config, split=dataset_args['dataset_split']) return dataset
def get_texts(dataset: DatasetDict, dataset_args: Dict[(str, Any)]) -> Dict[(str, Dict[(str, Any)])]: '\n Extract the texts from the dataset and return a dictionary containing the texts.\n\n Args:\n - dataset: A DatasetDict object containing the loaded dataset.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - A dictionary containing the texts extracted from the dataset.\n ' texts = defaultdict(dict) for config in dataset_args['dataset_configs']: for field in dataset_args['dataset_fields']: texts[config][field] = dataset[config][field] return texts
def get_few_shot_dataset(dataset_args: Dict[(str, Any)]) -> DatasetDict: '\n Load the few-shot dataset specified in dataset_args and return a DatasetDict object.\n\n Args:\n - dataset_args: A dictionary containing the few-shot dataset configurations.\n\n Returns:\n - A DatasetDict object containing the loaded few-shot dataset.\n ' dataset = DatasetDict() dataset['en'] = load_dataset('facebook/flores', 'eng_Latn', split='dev') for config in dataset_args['dataset_configs']: dataset[config] = load_dataset('facebook/flores', dataset_args['lang_codes'][config], split='dev') return dataset
def get_few_shot_prompts(dataset: DatasetDict, dataset_args: Dict[(str, Any)], translate_args: Dict[(str, Any)], shots: int) -> Dict[(str, str)]: '\n Generate few-shot prompts for each language in dataset_args and return a dictionary containing the prompts.\n\n Args:\n - dataset: A DatasetDict object containing the few-shot dataset.\n - dataset_args: A dictionary containing the dataset configurations.\n - translate_args: A dictionary containing the translation configurations.\n - shots: An integer representing the number of few-shot prompts to generate.\n\n Returns:\n - A dictionary containing the few-shot prompts for each language.\n ' prompts = {} for config in dataset_args['dataset_configs']: prompts[config] = '' (i, shot) = (0, 0) while (shot < shots): if (len(dataset[config][i]['sentence']) < 100): prompts[config] += f"{dataset_args['lang_names'][config]}: {dataset[config][i]['sentence']}{translate_args['eos_token']}" prompts[config] += f"English: {dataset['en'][i]['sentence']}{translate_args['eos_token']}" shot += 1 i += 1 prompts[config] += f"{dataset_args['lang_names'][config]}:" return prompts
def text_with_prompt(text: str, prompt: str, translate_args: Dict[(str, Any)]) -> str: '\n Concatenate the text with the prompt and the eos_token.\n\n Args:\n - text: A string representing the text to be concatenated.\n - prompt: A string representing the prompt to be concatenated.\n - translate_args: A dictionary containing the translation configurations.\n\n Returns:\n - A string representing the concatenated text with the prompt and the eos_token.\n ' return f"{prompt} {text}{translate_args['eos_token']}English:"
def map_texts_with_prompts(texts: Dict[(str, Dict[(str, List[str])])], prompts: Dict[(str, str)], translate_args: Dict[(str, Any)]) -> Dict[(str, Dict[(str, List[str])])]: '\n Map the texts with the prompts.\n\n Args:\n - texts: A dictionary containing the texts to be mapped.\n - prompts: A dictionary containing the prompts to be mapped.\n - translate_args: A dictionary containing the translation configurations.\n\n Returns:\n - A dictionary containing the mapped texts with the prompts.\n ' texts_with_prompts = defaultdict(dict) for config in texts: for field in dataset_args['dataset_fields']: texts_with_prompts[config][field] = [text_with_prompt(text, prompt=prompts[config], translate_args=translate_args) for text in texts[config][field]] return texts_with_prompts