code
stringlengths
17
6.64M
def beamsearch_hp(datapath, benchmark, backbone, thres, alpha, logpath, candidate_base, candidate_layers, beamsize, maxdepth): 'Implementation of beam search for hyperpixel layers' device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) model = hpflow.HyperpixelFlow(backbone, '0', benchmark, device) download.download_dataset(os.path.abspath(datapath), benchmark) dset = download.load_dataset(benchmark, datapath, thres, device, 'val') dataloader = DataLoader(dset, batch_size=1, num_workers=0) membuf_cand = [] for base in candidate_base: start = time.time() hyperpixel = parse_layers(base) score = evaluate.run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, True, model, dataloader) log_evaluation(base, score, (time.time() - start)) membuf_cand.append((score, base)) membuf_topk = find_topk(membuf_cand, beamsize) (score_sel, layer_sel) = find_topk(membuf_cand, 1)[0] log_selected(0, membuf_topk) for depth in range(1, maxdepth): membuf_cand = [] for (_, test_layer) in membuf_topk: for cand_layer in candidate_layers: if ((cand_layer not in test_layer) and (cand_layer > min(test_layer))): start = time.time() test_layers = sorted((test_layer + [cand_layer])) if (test_layers in list(map((lambda x: x[1]), membuf_cand))): break hyperpixel = parse_layers(test_layers) score = evaluate.run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, True, model, dataloader) log_evaluation(test_layers, score, (time.time() - start)) membuf_cand.append((score, test_layers)) membuf_topk = find_topk(membuf_cand, beamsize) (score_tmp, layer_tmp) = find_topk(membuf_cand, 1)[0] if (score_tmp > score_sel): layer_sel = layer_tmp score_sel = score_tmp log_selected(depth, membuf_topk) logging.info(('\nBest layers, score: %s %5.3f' % (layer_sel, score_sel))) return layer_sel
class CorrespondenceDataset(Dataset): 'Parent class of PFPascal, PFWillow, Caltech, and SPair' def __init__(self, benchmark, datapath, thres, device, split): 'CorrespondenceDataset constructor' super(CorrespondenceDataset, self).__init__() self.metadata = {'pfwillow': ('PF-WILLOW', 'test_pairs.csv', '', '', 'bbox'), 'pfpascal': ('PF-PASCAL', '_pairs.csv', 'JPEGImages', 'Annotations', 'img'), 'caltech': ('Caltech-101', 'test_pairs_caltech_with_category.csv', '101_ObjectCategories', '', ''), 'spair': ('SPair-71k', 'Layout/large', 'JPEGImages', 'PairAnnotation', 'bbox')} base_path = os.path.join(os.path.abspath(datapath), self.metadata[benchmark][0]) if (benchmark == 'pfpascal'): self.spt_path = os.path.join(base_path, (split + '_pairs.csv')) elif (benchmark == 'spair'): self.spt_path = os.path.join(base_path, self.metadata[benchmark][1], (split + '.txt')) else: self.spt_path = os.path.join(base_path, self.metadata[benchmark][1]) self.img_path = os.path.join(base_path, self.metadata[benchmark][2]) if (benchmark == 'spair'): self.ann_path = os.path.join(base_path, self.metadata[benchmark][3], split) else: self.ann_path = os.path.join(base_path, self.metadata[benchmark][3]) self.thres = (self.metadata[benchmark][4] if (thres == 'auto') else thres) self.transform = Normalize(['src_img', 'trg_img']) self.device = device self.split = split self.src_imnames = [] self.trg_imnames = [] self.train_data = [] self.src_kps = [] self.trg_kps = [] self.cls_ids = [] self.cls = [] def __len__(self): 'Returns the number of pairs' return len(self.train_data) def __getitem__(self, idx): 'Construct and return a batch' sample = dict() sample['src_imname'] = self.src_imnames[idx] sample['trg_imname'] = self.trg_imnames[idx] sample['pair_classid'] = self.cls_ids[idx] sample['pair_class'] = self.cls[sample['pair_classid']] sample['src_img'] = self.get_image(self.src_imnames, idx) sample['trg_img'] = self.get_image(self.trg_imnames, idx) sample['src_kps'] = self.get_points(self.src_kps, idx).to(self.device) sample['trg_kps'] = self.get_points(self.trg_kps, idx).to(self.device) sample['datalen'] = len(self.train_data) if self.transform: sample = self.transform(sample) sample['src_img'] = sample['src_img'].to(self.device) sample['trg_img'] = sample['trg_img'].to(self.device) return sample def get_image(self, img_names, idx): 'Return image tensor' img_name = os.path.join(self.img_path, img_names[idx]) image = self.get_imarr(img_name) image = torch.tensor(image.transpose(2, 0, 1).astype(np.float32)) return image def get_pckthres(self, sample): 'Compute PCK threshold' if (self.thres == 'bbox'): trg_bbox = sample['trg_bbox'] return torch.max((trg_bbox[2] - trg_bbox[0]), (trg_bbox[3] - trg_bbox[1])) elif (self.thres == 'img'): return torch.tensor(max(sample['trg_img'].size(1), sample['trg_img'].size(2))) else: raise Exception(('Invalid pck evaluation level: %s' % self.thres)) def get_points(self, pts, idx): 'Return key-points of an image' return pts[idx] def get_imarr(self, path): 'Read a single image file as numpy array from path' return np.array(Image.open(path).convert('RGB'))
class UnNormalize(): 'Image unnormalization' def __init__(self): self.mean = [0.485, 0.456, 0.406] self.std = [0.229, 0.224, 0.225] def __call__(self, image): img = image.clone() for (im_channel, mean, std) in zip(img, self.mean, self.std): im_channel.mul_(std).add_(mean) return img
class Normalize(): 'Image normalization' def __init__(self, image_keys, norm_range=True): self.image_keys = image_keys self.norm_range = norm_range self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def __call__(self, sample): for key in self.image_keys: if self.norm_range: sample[key] /= 255.0 sample[key] = self.normalize(sample[key]) return sample
def load_dataset(benchmark, datapath, thres, device, split='test'): 'Instantiate desired correspondence dataset' correspondence_benchmark = {'pfpascal': pfpascal.PFPascalDataset, 'pfwillow': pfwillow.PFWillowDataset, 'caltech': caltech.CaltechDataset, 'spair': spair.SPairDataset} dataset = correspondence_benchmark.get(benchmark) if (dataset is None): raise Exception(('Invalid benchmark dataset %s.' % benchmark)) return dataset(benchmark, datapath, thres, device, split)
def download_from_google(token_id, filename): 'Download desired filename from Google drive' print(('Downloading %s ...' % os.path.basename(filename))) url = 'https://docs.google.com/uc?export=download' destination = (filename + '.tar.gz') session = requests.Session() response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True) token = get_confirm_token(response) if token: params = {'id': token_id, 'confirm': token} response = session.get(url, params=params, stream=True) save_response_content(response, destination) file = tarfile.open(destination, 'r:gz') print(('Extracting %s ...' % destination)) file.extractall(filename) file.close() os.remove(destination) os.rename(filename, (filename + '_tmp')) os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename) os.rmdir((filename + '_tmp'))
def get_confirm_token(response): 'Retrieve confirm token' for (key, value) in response.cookies.items(): if key.startswith('download_warning'): return value return None
def save_response_content(response, destination): 'Save the response to the destination' chunk_size = 32768 with open(destination, 'wb') as file: for chunk in response.iter_content(chunk_size): if chunk: file.write(chunk)
def download_dataset(datapath, benchmark): 'Download desired semantic correspondence benchmark dataset from Google drive' if (not os.path.isdir(datapath)): os.mkdir(datapath) file_data = {'pfwillow': ('1tDP0y8RO5s45L-vqnortRaieiWENQco_', 'PF-WILLOW'), 'pfpascal': ('1OOwpGzJnTsFXYh-YffMQ9XKM_Kl_zdzg', 'PF-PASCAL'), 'caltech': ('1IV0E5sJ6xSdDyIvVSTdZjPHELMwGzsMn', 'Caltech-101'), 'spair': ('1KSvB0k2zXA06ojWNvFjBv0Ake426Y76k', 'SPair-71k')} (file_id, filename) = file_data[benchmark] abs_filepath = os.path.join(datapath, filename) if (not os.path.isdir(abs_filepath)): download_from_google(file_id, abs_filepath)
class SPairDataset(CorrespondenceDataset): 'Inherits CorrespondenceDataset' def __init__(self, benchmark, datapath, thres, device, split): 'SPair-71k dataset constructor' super(SPairDataset, self).__init__(benchmark, datapath, thres, device, split) self.train_data = open(self.spt_path).read().split('\n') self.train_data = self.train_data[:(len(self.train_data) - 1)] self.src_imnames = list(map((lambda x: (x.split('-')[1] + '.jpg')), self.train_data)) self.trg_imnames = list(map((lambda x: (x.split('-')[2].split(':')[0] + '.jpg')), self.train_data)) self.cls = os.listdir(self.img_path) self.cls.sort() anntn_files = [] for data_name in self.train_data: anntn_files.append(glob.glob(('%s/%s.json' % (self.ann_path, data_name)))[0]) anntn_files = list(map((lambda x: json.load(open(x))), anntn_files)) self.src_kps = list(map((lambda x: torch.tensor(x['src_kps'])), anntn_files)) self.trg_kps = list(map((lambda x: torch.tensor(x['trg_kps'])), anntn_files)) self.src_bbox = list(map((lambda x: torch.tensor(x['src_bndbox'])), anntn_files)) self.trg_bbox = list(map((lambda x: torch.tensor(x['trg_bndbox'])), anntn_files)) self.cls_ids = list(map((lambda x: self.cls.index(x['category'])), anntn_files)) self.vpvar = list(map((lambda x: torch.tensor(x['viewpoint_variation'])), anntn_files)) self.scvar = list(map((lambda x: torch.tensor(x['scale_variation'])), anntn_files)) self.trncn = list(map((lambda x: torch.tensor(x['truncation'])), anntn_files)) self.occln = list(map((lambda x: torch.tensor(x['occlusion'])), anntn_files)) def __getitem__(self, idx): 'Construct and return a batch for SPair-71k dataset' sample = super(SPairDataset, self).__getitem__(idx) sample['src_bbox'] = self.src_bbox[idx].to(self.device) sample['trg_bbox'] = self.trg_bbox[idx].to(self.device) sample['pckthres'] = self.get_pckthres(sample).to(self.device) sample['vpvar'] = self.vpvar[idx] sample['scvar'] = self.scvar[idx] sample['trncn'] = self.trncn[idx] sample['occln'] = self.occln[idx] return sample def get_image(self, img_names, idx): 'Return image tensor' img_name = os.path.join(self.img_path, self.cls[self.cls_ids[idx]], img_names[idx]) image = self.get_imarr(img_name) image = torch.tensor(image.transpose(2, 0, 1).astype(np.float32)) return image def get_pckthres(self, sample): 'Compute PCK threshold' return super(SPairDataset, self).get_pckthres(sample) def get_points(self, pts, idx): 'Return key-points of an image' return super(SPairDataset, self).get_points(pts, idx).t()
def run(datapath, benchmark, backbone, thres, alpha, hyperpixel, logpath, beamsearch, model=None, dataloader=None, visualize=False): 'Runs Hyperpixel Flow framework' if (not os.path.isdir('logs')): os.mkdir('logs') if (not beamsearch): cur_datetime = datetime.datetime.now().__format__('_%m%d_%H%M%S') logfile = os.path.join('logs', ((logpath + cur_datetime) + '.log')) util.init_logger(logfile) util.log_args(args) if visualize: os.mkdir((logfile + 'vis')) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) if (dataloader is None): download.download_dataset(os.path.abspath(datapath), benchmark) split = ('val' if beamsearch else 'test') dset = download.load_dataset(benchmark, datapath, thres, device, split) dataloader = DataLoader(dset, batch_size=1, num_workers=0) if (model is None): model = hpflow.HyperpixelFlow(backbone, hyperpixel, benchmark, device) else: model.hyperpixel_ids = util.parse_hyperpixel(hyperpixel) evaluator = evaluation.Evaluator(benchmark, device) for (idx, data) in enumerate(dataloader): (data['src_img'], data['src_kps'], data['src_intratio']) = util.resize(data['src_img'], data['src_kps'][0]) (data['trg_img'], data['trg_kps'], data['trg_intratio']) = util.resize(data['trg_img'], data['trg_kps'][0]) data['alpha'] = alpha with torch.no_grad(): (confidence_ts, src_box, trg_box) = model(data['src_img'], data['trg_img']) prd_kps = geometry.predict_kps(src_box, trg_box, data['src_kps'], confidence_ts) evaluator.evaluate(prd_kps, data) if (not beamsearch): evaluator.log_result(idx, data=data) if visualize: vispath = os.path.join((logfile + 'vis'), ('%03d_%s_%s' % (idx, data['src_imname'][0], data['trg_imname'][0]))) util.visualize_prediction(data['src_kps'].t().cpu(), prd_kps.t().cpu(), data['src_img'], data['trg_img'], vispath) if beamsearch: return ((sum(evaluator.eval_buf['pck']) / len(evaluator.eval_buf['pck'])) * 100.0) else: evaluator.log_result(len(dset), data=None, average=True)
class Evaluator(): 'To evaluate and log evaluation metrics: PCK, LT-ACC, IoU' def __init__(self, benchmark, device): 'Constructor for Evaluator' self.eval_buf = {'pfwillow': {'pck': [], 'cls_pck': dict()}, 'pfpascal': {'pck': [], 'cls_pck': dict()}, 'spair': {'pck': [], 'cls_pck': dict()}, 'caltech': {'ltacc': [], 'iou': []}} self.eval_funct = {'pfwillow': self.eval_pck, 'pfpascal': self.eval_pck, 'spair': self.eval_pck, 'caltech': self.eval_caltech} self.log_funct = {'pfwillow': self.log_pck, 'pfpascal': self.log_pck, 'spair': self.log_pck, 'caltech': self.log_caltech} self.eval_buf = self.eval_buf[benchmark] self.eval_funct = self.eval_funct[benchmark] self.log_funct = self.log_funct[benchmark] self.benchmark = benchmark self.device = device def evaluate(self, prd_kps, data): 'Compute desired evaluation metric' return self.eval_funct(prd_kps, data) def log_result(self, idx, data, average=False): 'Print results: PCK, or LT-ACC & IoU ' return self.log_funct(idx, data, average) def eval_pck(self, prd_kps, data): 'Compute percentage of correct key-points (PCK) based on prediction' pckthres = (data['pckthres'][0] * data['trg_intratio']) ncorrt = correct_kps(data['trg_kps'].cuda(), prd_kps, pckthres, data['alpha']) pair_pck = (int(ncorrt) / int(data['trg_kps'].size(1))) self.eval_buf['pck'].append(pair_pck) if (self.eval_buf['cls_pck'].get(data['pair_class'][0]) is None): self.eval_buf['cls_pck'][data['pair_class'][0]] = [] self.eval_buf['cls_pck'][data['pair_class'][0]].append(pair_pck) def log_pck(self, idx, data, average): 'Log percentage of correct key-points (PCK)' if average: pck = (sum(self.eval_buf['pck']) / len(self.eval_buf['pck'])) for cls in self.eval_buf['cls_pck']: cls_avg = (sum(self.eval_buf['cls_pck'][cls]) / len(self.eval_buf['cls_pck'][cls])) logging.info(('%15s: %3.3f' % (cls, cls_avg))) logging.info((' * Average: %3.3f' % pck)) return pck logging.info(('[%5d/%5d]: \t [Pair PCK: %3.3f]\t[Average: %3.3f] %s' % ((idx + 1), data['datalen'], self.eval_buf['pck'][idx], (sum(self.eval_buf['pck']) / len(self.eval_buf['pck'])), data['pair_class'][0]))) return None def eval_caltech(self, prd_kps, data): 'Compute LT-ACC and IoU based on transferred points' imsize = list(data['trg_img'].size())[1:] (trg_xstr, trg_ystr) = pts2ptstr(data['trg_kps']) trg_mask = ptstr2mask(trg_xstr, trg_ystr, imsize[0], imsize[1]) (prd_xstr, pred_ystr) = pts2ptstr(prd_kps) prd_mask = ptstr2mask(prd_xstr, pred_ystr, imsize[0], imsize[1]) lt_acc = label_transfer_accuracy(prd_mask, trg_mask) iou = intersection_over_union(prd_mask, trg_mask) self.eval_buf['ltacc'].append(lt_acc) self.eval_buf['iou'].append(iou) def log_caltech(self, idx, data, average): 'Log Caltech-101 dataset evaluation metrics: LT-ACC and IoU' if average: lt_acc = (sum(self.eval_buf['ltacc']) / len(self.eval_buf['ltacc'])) segiou = (sum(self.eval_buf['iou']) / len(self.eval_buf['iou'])) logging.info((' * Average LT-ACC: %3.2f' % lt_acc)) logging.info((' * Average IoU: %3.2f' % segiou)) return (lt_acc, segiou) logging.info(('[%5d/%5d]: \t [LT-ACC/IoU: %5.2f/%.2f]\t[Average: %5.2f/%.2f]' % ((idx + 1), data['datalen'], self.eval_buf['ltacc'][idx], self.eval_buf['iou'][idx], (sum(self.eval_buf['ltacc']) / len(self.eval_buf['ltacc'])), (sum(self.eval_buf['iou']) / len(self.eval_buf['iou']))))) return None
def correct_kps(trg_kps, prd_kps, pckthres, alpha=0.1): 'Compute the number of correctly transferred key-points' l2dist = torch.pow(torch.sum(torch.pow((trg_kps - prd_kps), 2), 0), 0.5) thres = pckthres.expand_as(l2dist).float() correct_pts = torch.le(l2dist, (thres * alpha)) return torch.sum(correct_pts)
def pts2ptstr(pts): 'Convert tensor of points to string' x_str = str(list(pts[0].cpu().numpy())) x_str = x_str[1:(len(x_str) - 1)] y_str = str(list(pts[1].cpu().numpy())) y_str = y_str[1:(len(y_str) - 1)] return (x_str, y_str)
def pts2mask(x_pts, y_pts, shape): 'Build a binary mask tensor base on given xy-points' (x_idx, y_idx) = draw.polygon(x_pts, y_pts, shape) mask = np.zeros(shape, dtype=np.bool) mask[(x_idx, y_idx)] = True return mask
def ptstr2mask(x_str, y_str, out_h, out_w): 'Convert xy-point mask (string) to tensor mask' x_pts = np.fromstring(x_str, sep=',') y_pts = np.fromstring(y_str, sep=',') mask_np = pts2mask(y_pts, x_pts, [out_h, out_w]) mask = torch.tensor(mask_np.astype(np.float32)).unsqueeze(0).unsqueeze(0).float() return mask
def intersection_over_union(mask1, mask2): 'Computes IoU between two masks' rel_part_weight = (torch.sum(torch.sum(mask2.gt(0.5).float(), 2, True), 3, True) / torch.sum(mask2.gt(0.5).float())) part_iou = (torch.sum(torch.sum((mask1.gt(0.5) & mask2.gt(0.5)).float(), 2, True), 3, True) / torch.sum(torch.sum((mask1.gt(0.5) | mask2.gt(0.5)).float(), 2, True), 3, True)) weighted_iou = torch.sum(torch.mul(rel_part_weight, part_iou)).item() return weighted_iou
def label_transfer_accuracy(mask1, mask2): 'LT-ACC measures the overlap with emphasis on the background class' return torch.mean((mask1.gt(0.5) == mask2.gt(0.5)).double()).item()
def init_logger(logfile): 'Initialize logging settings' logging.basicConfig(filemode='w', filename=logfile, level=logging.INFO, format='%(message)s', datefmt='%m-%d %H:%M:%S') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console)
def log_args(args): 'Log program arguments' logging.info('\n+========== Hyperpixel Flow Arguments ===========+') for arg_key in args.__dict__: logging.info(('| %20s: %-24s |' % (arg_key, str(args.__dict__[arg_key])))) logging.info('+================================================+\n')
def resize(img, kps, side_thres=300): 'Resize given image with imsize: (1, 3, H, W)' imsize = torch.tensor(img.size()).float() kps = kps.float() side_max = torch.max(imsize) inter_ratio = 1.0 if (side_max > side_thres): inter_ratio = (side_thres / side_max) img = F.interpolate(img, size=(int((imsize[2] * inter_ratio)), int((imsize[3] * inter_ratio))), mode='bilinear', align_corners=False) kps *= inter_ratio return (img.squeeze(0), kps, inter_ratio)
def where(predicate): 'Returns indices which match given predicate' matching_idx = predicate.nonzero() n_match = len(matching_idx) if (n_match != 0): matching_idx = matching_idx.t().squeeze(0) return matching_idx
def intersect1d(tensor1, tensor2): 'Takes two 1D tensor and returns tensor of common values' aux = torch.cat((tensor1, tensor2), dim=0) aux = aux.sort()[0] return aux[:(- 1)][(aux[1:] == aux[:(- 1)]).data]
def parse_hyperpixel(hyperpixel_ids): 'Parse given hyperpixel list (string -> int)' return list(map(int, re.findall('\\d+', hyperpixel_ids)))
def visualize_prediction(src_kps, prd_kps, src_img, trg_img, vispath, relaxation=2000): 'TPS transform source image using predicted correspondences' src_imsize = src_img.size()[1:][::(- 1)] trg_imsize = trg_img.size()[1:][::(- 1)] img_tps = geometry.ImageTPS(src_kps, prd_kps, src_imsize, trg_imsize, relaxation) wrp_img = ff.to_pil_image(img_tps(unnorm(src_img.cpu()))) trg_img = ff.to_pil_image(unnorm(trg_img.cpu())) new_im = Image.new('RGB', ((trg_imsize[0] * 2), trg_imsize[1])) new_im.paste(wrp_img, (0, 0)) new_im.paste(trg_img, (trg_imsize[0], 0)) new_im.save(vispath)
class GroundTruthDatasetFactory(Dataset): '\n Factory to create projection datasets from any 2D image-data.\n\n This is essentially a simple version of dival[1] without any noise contribution.\n\n References:\n [1] Johannes Leuschner, Maximilian Schmidt, Daniel Otero Baguer, and Peter Maaß.\n The lodopab-ct dataset: A benchmark dataset for low-dose ct reconstruction methods.\n arXiv preprint arXiv:1910.01113, 2019.\n ' def __init__(self, train_gt_images, val_gt_images, test_gt_images, inner_circle=True): '\n Note: Currently only odd sized images are supported.\n\n :param train_gt_images:\n :param val_gt_images:\n :param test_gt_images:\n :param inner_circle: all pixels outside the largest circle around the center are set to zero i.e.\n the detector length is equal to the image height\n ' self.train_gt_images = train_gt_images self.val_gt_images = val_gt_images self.test_gt_images = test_gt_images assert (self.train_gt_images.shape[1] == self.train_gt_images.shape[2]), 'Train images are not square.' assert ((self.train_gt_images.shape[1] % 2) == 1), 'Train image size has to be odd.' assert (self.val_gt_images.shape[1] == self.val_gt_images.shape[2]), 'Val images are not square.' assert ((self.val_gt_images.shape[1] % 2) == 1), 'Val image size has to be odd.' assert (self.test_gt_images.shape[1] == self.test_gt_images.shape[2]), 'Test images are not square.' assert ((self.test_gt_images.shape[1] % 2) == 1), 'Test image size has to be odd.' self.shape = (self.train_gt_images.shape[1], self.train_gt_images.shape[2]) self.inner_circle = inner_circle if self.inner_circle: circ_space = np.sqrt((((self.shape[0] / 2.0) ** 2) / 2.0)) min_pt = [(- circ_space), (- circ_space)] max_pt = [circ_space, circ_space] else: min_pt = [((- self.shape[0]) / 2.0), ((- self.shape[1]) / 2.0)] max_pt = [(self.shape[0] / 2.0), (self.shape[1] / 2.0)] space = uniform_discr(min_pt, max_pt, self.shape, dtype=np.float32) self.train_len = self.train_gt_images.shape[0] self.validation_len = self.val_gt_images.shape[0] self.test_len = self.test_gt_images.shape[0] self.random_access = True super().__init__(space=space) def _create_pair_dataset(self, forward_op, post_processor=None, noise_type=None, noise_kwargs=None, noise_seeds=None): dataset = ObservationGroundTruthPairDataset(self.generator, forward_op, post_processor=post_processor, train_len=self.train_len, validation_len=self.validation_len, test_len=self.test_len, noise_type=noise_type, noise_kwargs=noise_kwargs, noise_seeds=noise_seeds) return dataset def build_projection_dataset(self, num_angles, upscale_shape=70, impl='astra_cpu'): '\n Builds the forward projection operator. The ground truth images are upscaled during the forward\n operation to avoid the the [inverse crime](https://arxiv.org/abs/math-ph/0401050).\n\n :param num_angles: number of projection angles\n :param upscale_shape: to avoid inverse crime\n :param impl: radon transform implementation\n :return:\n ' (forward_op, get_reco_ray_trafo, reco_ray_trafo) = self._build_forward_op(upscale_shape, impl, num_angles) ds = self._create_pair_dataset(forward_op=forward_op, noise_type=None) ds.get_ray_trafo = get_reco_ray_trafo ds.ray_trafo = reco_ray_trafo return ds def _build_forward_op(self, upscale_shape, impl, num_angles): reco_space = self.space if self.inner_circle: space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32) min_pt = reco_space.min_pt max_pt = reco_space.max_pt proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (((2 * int(reco_space.max_pt[0])) - 1),)), dtype=np.float32) detector_length = get_detector_length(proj_space) det_partition = odl.uniform_partition((- np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2))), np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2)), detector_length) else: space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32) min_pt = reco_space.min_pt max_pt = reco_space.max_pt proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (reco_space.shape[0],)), dtype=np.float32) detector_length = get_detector_length(proj_space) det_partition = odl.uniform_partition(((- reco_space.shape[0]) / 2.0), (reco_space.shape[0] / 2.0), detector_length) angle_partition = odl.uniform_partition(0, np.pi, num_angles) reco_geometry = odl.tomo.Parallel2dGeometry(angle_partition, det_partition) ray_trafo = odl.tomo.RayTransform(space, reco_geometry, impl=impl) def get_reco_ray_trafo(**kwargs): return odl.tomo.RayTransform(reco_space, reco_geometry, **kwargs) reco_ray_trafo = get_reco_ray_trafo(impl=impl) class _ResizeOperator(odl.Operator): def __init__(self): super().__init__(reco_space, space) def _call(self, x, out, **kwargs): out.assign(space.element(resize(x, (upscale_shape, upscale_shape), order=1))) resize_op = _ResizeOperator() forward_op = (ray_trafo * resize_op) return (forward_op, get_reco_ray_trafo, reco_ray_trafo) def generator(self, part='train'): if (part == 'train'): gen = self._train_generator() elif (part == 'validation'): gen = self._val_generator() elif (part == 'test'): gen = self._test_generator() else: raise NotImplementedError for gt in gen: (yield gt) def _train_generator(self): for i in range(self.train_len): (yield self.train_gt_images[i].type(torch.float32)) def _test_generator(self): for i in range(self.test_len): (yield self.test_gt_images[i].type(torch.float32)) def _val_generator(self): for i in range(self.validation_len): (yield self.val_gt_images[i].type(torch.float32)) def get_sample(self, index, part='train', out=None): if (out == None): if (part == 'train'): return self.train_gt_images[index].type(torch.float32) elif (part == 'validation'): return self.val_gt_images[index].type(torch.float32) elif (part == 'test'): return self.test_gt_images[index].type(torch.float32) else: raise NotImplementedError elif (part == 'train'): out = self.train_gt_images[index].type(torch.float32) elif (part == 'validation'): out = self.val_gt_images[index].type(torch.float32) elif (part == 'test'): out = self.test_gt_images[index].type(torch.float32) else: raise NotImplementedError
class SResFourierCoefficientDataset(Dataset): def __init__(self, ds, amp_min, amp_max): self.ds = ds if ((amp_min == None) and (amp_max == None)): tmp_imgs = [] for i in np.random.permutation(len(self.ds))[:200]: img = self.ds[i] tmp_imgs.append(img) tmp_imgs = torch.stack(tmp_imgs) tmp_ffts = torch.fft.rfftn(tmp_imgs, dim=[1, 2]) log_amps = log_amplitudes(tmp_ffts.abs()) self.amp_min = log_amps.min() self.amp_max = log_amps.max() else: self.amp_min = amp_min self.amp_max = amp_max def __getitem__(self, item): img = self.ds[item] img_fft = torch.fft.rfftn(img, dim=[0, 1]) (img_amp, img_phi) = normalize_FC(img_fft, amp_min=self.amp_min, amp_max=self.amp_max) img_fft = torch.stack([img_amp.flatten(), img_phi.flatten()], dim=(- 1)) return (img_fft, (self.amp_min.unsqueeze((- 1)), self.amp_max.unsqueeze((- 1)))) def __len__(self): return len(self.ds)
class TRecFourierCoefficientDataset(Dataset): def __init__(self, ds, angles, mag_min, mag_max, img_shape=42, inner_circle=True): self.ds = ds self.img_shape = img_shape self.inner_circle = inner_circle self.angles = angles if ((mag_min == None) and (mag_max == None)): tmp_sinos = [] for i in np.random.permutation(len(self.ds))[:200]: (sino, _) = self.ds[i] tmp_sinos.append(sino) tmp_sinos = torch.stack(tmp_sinos) tmp_ffts = torch.fft.rfftn(tmp_sinos, dim=[1, 2]) tmp_amps = log_amplitudes(tmp_ffts.abs()) self.amp_min = tmp_amps.min() self.amp_max = tmp_amps.max() else: self.amp_min = mag_min self.amp_max = mag_max def __getitem__(self, item): (sino, img) = self.ds[item] fbp = torch.from_numpy(np.array(iradon(sino.numpy().T, theta=np.rad2deg((- self.angles)), circle=self.inner_circle, output_size=self.img_shape).astype(np.float32).T)) sino_fft = torch.fft.rfftn(torch.roll(sino, ((sino.shape[1] // 2) + 1), 1), dim=[(- 1)]) fbp_fft = torch.fft.rfftn(torch.roll(fbp, (2 * (((img.shape[0] // 2) + 1),)), (0, 1)), dim=[0, 1]) img_fft = torch.fft.rfftn(torch.roll(img, (2 * (((img.shape[0] // 2) + 1),)), (0, 1)), dim=[0, 1]) (sino_amp, sino_phi) = normalize_FC(sino_fft, amp_min=self.amp_min, amp_max=self.amp_max) (fbp_amp, fbp_phi) = normalize_FC(fbp_fft, amp_min=self.amp_min, amp_max=self.amp_max) (img_amp, img_phi) = normalize_FC(img_fft, amp_min=self.amp_min, amp_max=self.amp_max) sino_fc = torch.stack([sino_amp.flatten(), sino_phi.flatten()], dim=(- 1)) fbp_fc = torch.stack([fbp_amp.flatten(), fbp_phi.flatten()], dim=(- 1)) img_fc = torch.stack([img_amp.flatten(), img_phi.flatten()], dim=(- 1)) return (sino_fc, fbp_fc, img_fc, img, (self.amp_min.unsqueeze((- 1)), self.amp_max.unsqueeze((- 1)))) def __len__(self): return len(self.ds)
def _fc_prod_loss(pred_fc, target_fc, amp_min, amp_max): pred_amp = denormalize_amp(pred_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max) target_amp = denormalize_amp(target_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max) pred_phi = denormalize_phi(pred_fc[(..., 1)]) target_phi = denormalize_phi(target_fc[(..., 1)]) amp_loss = (1 + torch.pow((pred_amp - target_amp), 2)) phi_loss = (2 - torch.cos((pred_phi - target_phi))) return (torch.mean((amp_loss * phi_loss)), torch.mean(amp_loss), torch.mean(phi_loss))
def _fc_sum_loss(pred_fc, target_fc, amp_min, amp_max): pred_amp = denormalize_amp(pred_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max) target_amp = denormalize_amp(target_fc[(..., 0)], amp_min=amp_min, amp_max=amp_max) pred_phi = denormalize_phi(pred_fc[(..., 1)]) target_phi = denormalize_phi(target_fc[(..., 1)]) amp_loss = torch.pow((pred_amp - target_amp), 2) phi_loss = (1 - torch.cos((pred_phi - target_phi))) return (torch.mean((amp_loss + phi_loss)), torch.mean(amp_loss), torch.mean(phi_loss))
class SResTransformerTrain(torch.nn.Module): def __init__(self, d_model, coords, flatten_order, attention_type='linear', n_layers=4, n_heads=4, d_query=32, dropout=0.1, attention_dropout=0.1): super(SResTransformerTrain, self).__init__() self.fourier_coefficient_embedding = torch.nn.Linear(2, (d_model // 2)) self.pos_embedding = PositionalEncoding2D((d_model // 2), coords=coords, flatten_order=flatten_order, persistent=False) self.encoder = TransformerEncoderBuilder.from_kwargs(attention_type=attention_type, n_layers=n_layers, n_heads=n_heads, feed_forward_dimensions=((n_heads * d_query) * 4), query_dimensions=d_query, value_dimensions=d_query, dropout=dropout, attention_dropout=attention_dropout).get() self.predictor_amp = torch.nn.Linear((n_heads * d_query), 1) self.predictor_phase = torch.nn.Linear((n_heads * d_query), 1) def forward(self, x): x = self.fourier_coefficient_embedding(x) x = self.pos_embedding(x) triangular_mask = TriangularCausalMask(x.shape[1], device=x.device) y_hat = self.encoder(x, attn_mask=triangular_mask) y_amp = self.predictor_amp(y_hat) y_phase = torch.tanh(self.predictor_phase(y_hat)) return torch.cat([y_amp, y_phase], dim=(- 1))
class SResTransformerPredict(torch.nn.Module): def __init__(self, d_model, coords, flatten_order, attention_type='full', n_layers=4, n_heads=4, d_query=32, dropout=0.1, attention_dropout=0.1): super(SResTransformerPredict, self).__init__() self.fourier_coefficient_embedding = torch.nn.Linear(2, (d_model // 2)) self.pos_embedding = PositionalEncoding2D((d_model // 2), coords=coords, flatten_order=flatten_order, persistent=False) self.encoder = RecurrentEncoderBuilder.from_kwargs(attention_type=attention_type, n_layers=n_layers, n_heads=n_heads, feed_forward_dimensions=((n_heads * d_query) * 4), query_dimensions=d_query, value_dimensions=d_query, dropout=dropout, attention_dropout=attention_dropout).get() self.predictor_amp = torch.nn.Linear((n_heads * d_query), 1) self.predictor_phase = torch.nn.Linear((n_heads * d_query), 1) def forward(self, x, i=0, memory=None): x = x.view(x.shape[0], (- 1)) x = self.fourier_coefficient_embedding(x) x = self.pos_embedding.forward_i(x, i) (y_hat, memory) = self.encoder(x, memory) y_amp = self.predictor_amp(y_hat) y_phase = torch.tanh(self.predictor_phase(y_hat)) return (torch.cat([y_amp, y_phase], dim=(- 1)), memory)
class RAdam(Optimizer): def __init__(self, params, lr=0.001, max_grad_norm=1.0, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, degenerated_to_sgd=True): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) self.degenerated_to_sgd = degenerated_to_sgd if (isinstance(params, (list, tuple)) and (len(params) > 0) and isinstance(params[0], dict)): for param in params: if (('betas' in param) and ((param['betas'][0] != betas[0]) or (param['betas'][1] != betas[1]))): param['buffer'] = [[None, None, None] for _ in range(10)] defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) def set_lr(self, lr): for group in self.param_groups: for p in group['params']: state = self.state[p] if (not state): return else: group['lr'] = lr return def get_lr(self): ' get learning rate in training ' lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if (not state): return [0] else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) state['step'] += 1 buffered = group['buffer'][int((state['step'] % 10))] if (state['step'] == buffered[0]): (N_sma, step_size) = (buffered[1], buffered[2]) else: buffered[0] = state['step'] beta2_t = (beta2 ** state['step']) N_sma_max = ((2 / (1 - beta2)) - 1) N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t))) buffered[1] = N_sma if (N_sma >= 5): step_size = (math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2))) / (1 - (beta1 ** state['step']))) elif self.degenerated_to_sgd: step_size = (1.0 / (1 - (beta1 ** state['step']))) else: step_size = (- 1) buffered[2] = step_size if (N_sma >= 5): if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(((- step_size) * group['lr']), exp_avg, denom) p.data.copy_(p_data_fp32) elif (step_size > 0): if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.add_(((- step_size) * group['lr']), exp_avg) p.data.copy_(p_data_fp32) return loss
class PlainRAdam(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, degenerated_to_sgd=True): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) self.degenerated_to_sgd = degenerated_to_sgd defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) super(PlainRAdam, self).__init__(params, defaults) def __setstate__(self, state): super(PlainRAdam, self).__setstate__(state) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) state['step'] += 1 beta2_t = (beta2 ** state['step']) N_sma_max = ((2 / (1 - beta2)) - 1) N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t))) if (N_sma >= 5): if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) step_size = ((group['lr'] * math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2)))) / (1 - (beta1 ** state['step']))) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) p.data.copy_(p_data_fp32) elif self.degenerated_to_sgd: if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) step_size = (group['lr'] / (1 - (beta1 ** state['step']))) p_data_fp32.add_((- step_size), exp_avg) p.data.copy_(p_data_fp32) return loss
class AdamW(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup=0): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, warmup=warmup) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) if (group['warmup'] > state['step']): scheduled_lr = (1e-08 + ((state['step'] * group['lr']) / group['warmup'])) else: scheduled_lr = group['lr'] step_size = ((scheduled_lr * math.sqrt(bias_correction2)) / bias_correction1) if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * scheduled_lr), p_data_fp32) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) p.data.copy_(p_data_fp32) return loss
class TestTomoUtils(unittest.TestCase): def setUp(self) -> None: self.img_shape = 27 self.angles = np.array([0, (np.pi / 2), np.pi]) def test_cartesian_rfft_2D(self): (x, y, flatten_indices, order) = get_cartesian_rfft_coords_2D(self.img_shape) x_ordered = torch.zeros_like(x) x_ordered[flatten_indices] = x x_ordered = x_ordered.reshape(self.img_shape, (- 1)) y_ordered = torch.zeros_like(y) y_ordered[flatten_indices] = y y_ordered = y_ordered.reshape(self.img_shape, (- 1)) y_ordered = torch.roll(y_ordered, (- ((self.img_shape // 2) + 1)), 0) (y_target, x_target) = torch.meshgrid(torch.arange(self.img_shape), torch.arange(((self.img_shape // 2) + 1))) self.assertEqual(order[(0, 0)], 0, 'Top left pixel should have index 0.') self.assertTrue((torch.all((x_target == x_ordered)) and torch.all((y_target == y_ordered))), 'rFFT coordinates are wrong.') def test_polar_rfft_2D(self): (r, phi, flatten_indices, order) = get_polar_rfft_coords_2D(img_shape=self.img_shape) self.assertEqual(order[(0, 0)], 0, 'Top left pixel should have index 0.') r_ordered = torch.zeros_like(r) r_ordered[flatten_indices] = r r_ordered = r_ordered.reshape(self.img_shape, (- 1)) self.assertEqual(r_ordered[(0, 0)], 0, 'Top left pixel does not have radius 0.') phi_ordered = torch.zeros_like(phi) phi_ordered[flatten_indices] = phi phi_ordered = phi_ordered.reshape(self.img_shape, (- 1)) self.assertEqual(phi_ordered[(0, 0)], 0, 'Top left pixel angle does not correspond to 0.') self.assertEqual(phi_ordered[((self.img_shape // 2), 0)], (np.pi / 2), 'Phi component is of (test 1).') self.assertEqual(phi_ordered[((self.img_shape - 1), 0)], ((- np.pi) / 2), 'Phi component is of (test 2).') def test_polar_sinogram(self): (r, phi, flatten_indices) = get_polar_rfft_coords_sinogram(self.angles, self.img_shape) self.assertTrue(torch.all(((r[0::3] == r[1::3]) == (r[1::3] == r[2::3]))), 'Radii of polar sinogram coords are off.') phi_ordered = torch.zeros_like(phi) phi_ordered[flatten_indices] = phi self.assertTrue(torch.all((phi_ordered[:((self.img_shape // 2) + 1)] == (np.pi / 2.0))), 'Phi of polar sinogram coords are off (test1).') self.assertTrue(torch.all((phi_ordered[((self.img_shape // 2) + 1):(- ((self.img_shape // 2) + 1))] == 0)), 'Phi of polar sinogram coords are off (test1).') self.assertTrue(torch.all((phi_ordered[(- ((self.img_shape // 2) + 1)):] == ((- np.pi) / 2.0))), 'Phi of polar sinogram coords are off (test2).') def test_cartesian_sinogram(self): (x, y, flatten_indices) = get_cartesian_rfft_coords_sinogram(self.angles, self.img_shape) print(x) self.assertTrue(torch.all((x <= ((self.img_shape // 2) + 1)))) self.assertTrue(torch.all((x >= 0))) self.assertTrue(torch.all((y <= self.img_shape))) self.assertTrue(torch.all((y >= 0)))
class TestUtils(unittest.TestCase): def test_cart2pol2cart(self): x = torch.arange(1, 6, dtype=torch.float32) y = torch.arange((- 2), 3, dtype=torch.float32) (r, phi) = cart2pol(x, y) (x_, y_) = pol2cart(r, phi) self.assertTrue((torch.allclose(x, x_) and torch.allclose(y, y_)), 'Cartesian to polar coordinate transformations are broken.') def test_normlize_denormalize_realspace(self): data = torch.from_numpy(np.array([(- 1), 2, 4, 0, (- 5)], dtype=np.float32)) mean = torch.mean(data) std = torch.std(data) data_n = normalize(data, mean, std) self.assertAlmostEqual(torch.mean(data_n).item(), 0, 7) self.assertAlmostEqual(torch.std(data_n).item(), 1, 7) data_dn = denormalize(data_n, mean, std) self.assertTrue(torch.allclose(data, data_dn)) def test_normalize_denormalize_amplitudes(self): amps = torch.exp(torch.arange(6, dtype=torch.float32)) log_amps = log_amplitudes(amps) min_amp = log_amps.min() max_amp = log_amps.max() n_amps = normalize_amp(amps, amp_min=min_amp, amp_max=max_amp) amps_ = denormalize_amp(n_amps, amp_min=min_amp, amp_max=max_amp) self.assertTrue(torch.allclose(amps, amps_)) def test_normalize_denormalize_phases(self): phases = torch.linspace((- np.pi), np.pi, 10) phases_n = normalize_phi(phases) phases_ = denormalize_phi(phases_n) self.assertTrue(torch.allclose(phases, phases_)) def test_normalize_denormalize_FC(self): img = psf_real(7, 27) rfft = torch.fft.rfftn(img) log_amps = log_amplitudes(rfft.abs()) min_amp = log_amps.min() max_amp = log_amps.max() (amp_n, phi_n) = normalize_FC(rfft, amp_min=min_amp, amp_max=max_amp) fc_n = torch.stack([amp_n, phi_n], (- 1)) rfft_ = denormalize_FC(fc_n, amp_min=min_amp, amp_max=max_amp) self.assertTrue(torch.allclose(rfft, rfft_)) def test_convert2DFT(self): img = psf_real(7, 27) rfft = torch.fft.rfftn(img) log_amps = log_amplitudes(rfft.abs()) min_amp = log_amps.min() max_amp = log_amps.max() order = torch.from_numpy(np.random.permutation((27 * 14))) (amp_n, phi_n) = normalize_FC(rfft, amp_min=min_amp, amp_max=max_amp) fc_n = torch.stack([amp_n.flatten(), phi_n.flatten()], dim=(- 1))[order] dft = convert2DFT(fc_n.unsqueeze(0), amp_min=min_amp, amp_max=max_amp, dst_flatten_order=order, img_shape=27) img_ = torch.fft.irfftn(dft, s=(27, 27)) self.assertTrue(torch.allclose(img, img_))
def has_file_allowed_extension(filename: str, extensions: Tuple[(str, ...)]) -> bool: return filename.lower().endswith(extensions)
def make_subsampled_dataset(directory, class_to_idx, extensions=None, is_valid_file=None, sampling_ratio=1.0, nb_classes=None): instances = [] directory = os.path.expanduser(directory) both_none = ((extensions is None) and (is_valid_file is None)) both_something = ((extensions is not None) and (is_valid_file is not None)) if (both_none or both_something): raise ValueError('Both extensions and is_valid_file cannot be None or not None at the same time') if (extensions is not None): def is_valid_file(x: str) -> bool: return has_file_allowed_extension(x, cast(Tuple[(str, ...)], extensions)) is_valid_file = cast(Callable[([str], bool)], is_valid_file) for (i, target_class) in enumerate(sorted(class_to_idx.keys())): if ((nb_classes is not None) and (i >= nb_classes)): break class_index = class_to_idx[target_class] target_dir = os.path.join(directory, target_class) if (not os.path.isdir(target_dir)): continue num_imgs = int((len(os.listdir(target_dir)) * sampling_ratio)) imgs = 0 for (root, _, fnames) in sorted(os.walk(target_dir, followlinks=True)): for fname in sorted(fnames): if (imgs == num_imgs): break path = os.path.join(root, fname) if is_valid_file(path): item = (path, class_index) instances.append(item) imgs += 1 return instances
class INatDataset(ImageFolder): def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year path_json = os.path.join(root, f"{('train' if train else 'val')}{year}.json") with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, 'categories.json')) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f'train{year}.json') with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter['annotations']: king = [] king.append(data_catg[int(elem['category_id'])][category]) if (king[0] not in targeter.keys()): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data['images']: cut = elem['file_name'].split('/') target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true))
class SubsampledDatasetFolder(DatasetFolder): def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None, sampling_ratio=1.0, nb_classes=None): super(DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform) (classes, class_to_idx) = self._find_classes(self.root) samples = make_subsampled_dataset(self.root, class_to_idx, extensions, is_valid_file, sampling_ratio=sampling_ratio, nb_classes=nb_classes) if (len(samples) == 0): msg = 'Found 0 files in subfolders of: {}\n'.format(self.root) if (extensions is not None): msg += 'Supported extensions are: {}'.format(','.join(extensions)) raise RuntimeError(msg) self.loader = loader self.extensions = extensions self.classes = classes self.class_to_idx = class_to_idx self.samples = samples self.targets = [s[1] for s in samples]
class ImageNetDataset(SubsampledDatasetFolder): def __init__(self, root, loader=default_loader, is_valid_file=None, **kwargs): super(ImageNetDataset, self).__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), is_valid_file=is_valid_file, **kwargs) self.imgs = self.samples
def build_dataset(is_train, args): transform = build_transform(is_train, args) if (args.data_set == 'CIFAR10'): dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform, download=True) nb_classes = 10 if (args.data_set == 'CIFAR100'): dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True) nb_classes = 100 elif (args.data_set == 'IMNET'): root = os.path.join(args.data_path, ('train' if is_train else 'val')) dataset = ImageNetDataset(root, transform=transform, sampling_ratio=(args.sampling_ratio if is_train else 1.0), nb_classes=args.nb_classes) nb_classes = (args.nb_classes if (args.nb_classes is not None) else 1000) elif (args.data_set == 'INAT'): dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes elif (args.data_set == 'INAT19'): args.data_path = '/datasets01/inaturalist/090619/' dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes return (dataset, nb_classes)
def build_transform(is_train, args): resize_im = (args.input_size > 32) if is_train: transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount) if (not resize_im): transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4) return transform t = [] if resize_im: size = int(((256 / 224) * args.input_size)) t.append(transforms.Resize(size, interpolation=3)) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) return transforms.Compose(t)
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None): model.train() criterion.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 per_batch_time = [] for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header): s = time.time() samples = samples.to(device, non_blocking=True) targets = targets.to(device, non_blocking=True) if (mixup_fn is not None): (samples, targets) = mixup_fn(samples, targets) with torch.cuda.amp.autocast(): outputs = model(samples) if ((outputs != outputs).nonzero().size(0) > 0): print('output is NaN') exit() loss = criterion(outputs, targets) loss_value = loss.item() if (not math.isfinite(loss_value)): print('Loss is {}, stopping training'.format(loss_value)) sys.exit(1) optimizer.zero_grad() is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order) loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order) torch.cuda.synchronize() if (model_ema is not None): model_ema.update(model) metric_logger.update(loss=loss_value) metric_logger.update(lr=optimizer.param_groups[0]['lr']) per_batch_time.append((time.time() - s)) metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
@torch.no_grad() def evaluate(data_loader, model, device): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils.MetricLogger(delimiter=' ') header = 'Test:' model.eval() for (images, target) in metric_logger.log_every(data_loader, 10, header): images = images.to(device, non_blocking=True) target = target.to(device, non_blocking=True) with torch.cuda.amp.autocast(): output = model(images) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) batch_size = images.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
def get_args_parser(): parser = argparse.ArgumentParser('PerViT training and evaluation script', add_help=False) parser.add_argument('--batch-size', default=256, type=int) parser.add_argument('--epochs', default=300, type=int) parser.add_argument('--model', default='pervit_tiny', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--load', type=str, default='') parser.add_argument('--pretrained', action='store_true') parser.add_argument('--input-size', default=224, type=int, help='images input size') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') parser.add_argument('--model-ema', action='store_true') parser.add_argument('--no-model-ema', action='store_false', dest='model_ema') parser.set_defaults(model_ema=False) parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='') parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='') parser.add_argument('--use_pos_embed', action='store_true', help='Absolute positional embedding') parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt-eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"') parser.add_argument('--lr', type=float, default=0.0005, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') parser.add_argument('--warmup-lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min-lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') (parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),) parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--repeated-aug', action='store_true') parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug') parser.set_defaults(repeated_aug=True) parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') parser.add_argument('--data-path', default='../Datasets_CLS/ILSVRC2012/', type=str, help='dataset path') parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET', 'INAT', 'INAT19'], type=str, help='Image Net dataset path') parser.add_argument('--sampling_ratio', default=1.0, type=float, help='fraction of samples to keep in the training set of imagenet') parser.add_argument('--nb_classes', default=1000, type=int, help='number of classes in imagenet') parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity') parser.add_argument('--output_dir', default='logs/test/', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='yes', help='resume from checkpoint') parser.add_argument('--save_every', default=1, type=int, help='save model every epochs') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--visualize', action='store_true', help='Visualize learned attentions') return parser
def main(args): utils.init_distributed_mode(args) if utils.is_main_process(): tbd_writer = SummaryWriter(os.path.join(args.output_dir, 'tbd/runs')) print(args) device = torch.device(args.device) seed = (args.seed + utils.get_rank()) print('seed: ', seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) cudnn.benchmark = True if (not args.eval): (dataset_train, args.nb_classes) = build_dataset(is_train=True, args=args) num_tasks = utils.get_world_size() global_rank = utils.get_rank() if args.repeated_aug: sampler_train = RASampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) else: sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True) (dataset_val, _) = build_dataset(is_train=False, args=args) data_loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=int((1.5 * args.batch_size)), shuffle=False, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False) mixup_fn = None mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None)) if mixup_active: mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) print(f'Creating model: {args.model}') model = create_model(args.model, pretrained=args.pretrained, num_classes=args.nb_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, use_pos_embed=args.use_pos_embed) print(model) model.to(device) if args.pretrained: checkpoint = torch.load(args.load) model.load_state_dict(checkpoint['model']) model.init_rpe() with torch.cuda.device(0): (macs, params) = get_model_complexity_info(model, (3, 224, 224), as_strings=True, print_per_layer_stat=True, verbose=True) print('{:<30} {:<8}'.format('Computational complexity: ', macs)) print('{:<30} {:<8}'.format('Number of parameters: ', params)) if args.visualize: basepath = ('vis/%s' % args.load.split('/')[(- 2)]) os.makedirs(('%s/weight' % basepath), exist_ok=True) vis_attention(model, basepath, side=14, q=(7, 7)) print(('Attention visualized at %s' % basepath)) model_ema = None if args.model_ema: model_ema = ModelEma(model, decay=args.model_ema_decay, device=('cpu' if args.model_ema_force_cpu else ''), resume='') model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad)) print('number of params:', n_parameters) linear_scaled_lr = (((args.lr * args.batch_size) * utils.get_world_size()) / 512.0) args.lr = linear_scaled_lr optimizer = create_optimizer(args, model) loss_scaler = NativeScaler() (lr_scheduler, _) = create_scheduler(args, optimizer) criterion = LabelSmoothingCrossEntropy() if (args.mixup > 0.0): criterion = SoftTargetCrossEntropy() elif args.smoothing: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() output_dir = Path(args.output_dir) if utils.is_main_process(): torch.save(args, (output_dir / 'args.pyT')) if (args.resume and utils.is_main_process()): if str(args.resume).startswith('https'): shutil.rmtree(('%s/tbd/' % args.output_dir)) os.remove(('%s/args.pyT' % args.output_dir)) file_id = args.resume[(args.resume.index('/') + 2):] utils.download_from_google_drive(file_id, args.output_dir) resume_path = os.path.join(args.output_dir, 'checkpoint_latest.pth') latest_exist = os.path.exists(resume_path) if latest_exist: checkpoint = torch.load(resume_path, map_location='cpu') if (latest_exist and (not args.eval)): model_without_ddp.load_state_dict(checkpoint['model']) if ((not args.eval) and ('optimizer' in checkpoint) and ('lr_scheduler' in checkpoint) and ('epoch' in checkpoint)): optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) args.start_epoch = (checkpoint['epoch'] + 1) if args.model_ema: utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if args.eval: throughput = utils.compute_throughput(model, resolution=args.input_size) print(f'Throughput : {throughput:.2f}') model.initialize() test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") return print('Start training') start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): gc.collect() if args.distributed: data_loader_train.sampler.set_epoch(epoch) train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn) lr_scheduler.step(epoch) if args.output_dir: checkpoint_paths = [(output_dir / 'checkpoint.pth')] if (args.save_every is not None): if ((epoch % args.save_every) == 0): checkpoint_paths.append((output_dir / 'checkpoint_latest.pth')) for checkpoint_path in checkpoint_paths: utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'model_ema': (get_state_dict(model_ema) if model_ema else None), 'args': args}, checkpoint_path) test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") max_accuracy = max(max_accuracy, test_stats['acc1']) print(f'Max accuracy: {max_accuracy:.2f}%') if utils.is_main_process(): tbd_writer.add_scalars('data/loss', {'trn_loss': train_stats['loss'], 'test_loss': test_stats['loss']}, (epoch + 1)) tbd_writer.add_scalars('data/acc1', {'test_acc1': test_stats['acc1']}, (epoch + 1)) tbd_writer.add_scalars('data/acc5', {'test_acc5': test_stats['acc5']}, (epoch + 1)) tbd_writer.add_scalars('data/lr', {'lr': train_stats['lr']}, (epoch + 1)) tbd_writer.flush() if utils.is_main_process(): total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) tbd_writer.close() print('Training time {}'.format(total_time_str))
@register_model def pervit_tiny(pretrained=False, **kwargs): num_heads = 4 kwargs['emb_dims'] = [128, 192, 224, 280] kwargs['convstem_dims'] = [3, 48, 64, 96, 128] model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() return model
@register_model def pervit_small(pretrained=False, **kwargs): num_heads = 8 kwargs['emb_dims'] = [272, 320, 368, 464] kwargs['convstem_dims'] = [3, 64, 128, 192, 272] model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() return model
@register_model def pervit_medium(pretrained=False, **kwargs): num_heads = 12 kwargs['emb_dims'] = [312, 468, 540, 684] kwargs['convstem_dims'] = [3, 64, 192, 256, 312] model = VisionTransformer(num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs) model.default_cfg = _cfg() return model
class OffsetGenerator(): @classmethod def initialize(cls, n_patch_side, pad_size): grid_1d = torch.linspace((- 1), 1, n_patch_side).to('cuda') if (pad_size > 0): pad_dist = torch.cumsum((grid_1d[(- 1)] - grid_1d[(- 2)]).repeat(pad_size), dim=0) grid_1d = torch.cat([((- 1) - pad_dist).flip(dims=[0]), grid_1d, (1 + pad_dist)]) n_patch_side += (pad_size * 2) n_tokens = (n_patch_side ** 2) grid_y = grid_1d.view((- 1), 1).repeat(1, n_patch_side) grid_x = grid_1d.view(1, (- 1)).repeat(n_patch_side, 1) grid = torch.stack([grid_y, grid_x], dim=(- 1)).view((- 1), 2) grid_q = grid.view((- 1), 1, 2).repeat(1, n_tokens, 1) grid_k = grid.view(1, (- 1), 2).repeat(n_tokens, 1, 1) cls.qk_vec = (grid_k - grid_q) @classmethod def get_qk_vec(cls): return cls.qk_vec.clone()
class RASampler(torch.utils.data.Sampler): 'Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n ' def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(((len(self.dataset) * 3.0) / self.num_replicas))) self.total_size = (self.num_samples * self.num_replicas) self.num_selected_samples = int(math.floor((((len(self.dataset) // 256) * 256) / self.num_replicas))) self.shuffle = shuffle def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) if self.shuffle: indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) indices = [ele for ele in indices for i in range(3)] indices += indices[:(self.total_size - len(indices))] assert (len(indices) == self.total_size) indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) return iter(indices[:self.num_selected_samples]) def __len__(self): return self.num_selected_samples def set_epoch(self, epoch): self.epoch = epoch
class SmoothedValue(object): 'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n ' def __init__(self, window_size=20, fmt=None): if (fmt is None): fmt = '{median:.4f} ({global_avg:.4f})' self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += (value * n) def synchronize_between_processes(self): '\n Warning: does not synchronize the deque!\n ' if (not is_dist_avail_and_initialized()): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return (self.total / self.count) @property def max(self): return max(self.deque) @property def value(self): return self.deque[(- 1)] def __str__(self): return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
class MetricLogger(object): def __init__(self, delimiter='\t'): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for (k, v) in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if (attr in self.meters): return self.meters[attr] if (attr in self.__dict__): return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr)) def __str__(self): loss_str = [] for (name, meter) in self.meters.items(): loss_str.append('{}: {}'.format(name, str(meter))) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if (not header): header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ((':' + str(len(str(len(iterable))))) + 'd') log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}'] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = (1024.0 * 1024.0) for obj in iterable: data_time.update((time.time() - end)) (yield obj) iter_time.update((time.time() - end)) if (((i % print_freq) == 0) or (i == (len(iterable) - 1))): eta_seconds = (iter_time.global_avg * (len(iterable) - i)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB))) else: print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
def _load_checkpoint_for_ema(model_ema, checkpoint): '\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n ' mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master): '\n This function disables printing when not in master process\n ' import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if (is_master or force): builtin_print(*args, **kwargs) __builtin__.print = print
def is_dist_avail_and_initialized(): if (not dist.is_available()): return False if (not dist.is_initialized()): return False return True
def get_world_size(): if (not is_dist_avail_and_initialized()): return 1 return dist.get_world_size()
def get_rank(): if (not is_dist_avail_and_initialized()): return 0 return dist.get_rank()
def is_main_process(): return (get_rank() == 0)
def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs)
def init_distributed_mode(args): if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)): args.rank = int(os.environ['RANK']) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif ('SLURM_PROCID' in os.environ): args.rank = int(os.environ['SLURM_PROCID']) args.gpu = (args.rank % torch.cuda.device_count()) else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed((args.rank == 0))
@torch.no_grad() def compute_throughput(model, batch_size=128, resolution=224): torch.cuda.empty_cache() warmup_iters = 3 num_iters = 30 model.eval() model.to('cuda') timing = [] inputs = torch.randn(batch_size, 3, resolution, resolution, device='cuda') for _ in range(warmup_iters): model(inputs) torch.cuda.synchronize() for _ in range(num_iters): start = time.time() model(inputs) torch.cuda.synchronize() timing.append((time.time() - start)) timing = torch.as_tensor(timing, dtype=torch.float32) return (batch_size / timing.mean())
def download_from_google_drive(file_id, output_dir): url = ('https://drive.google.com/uc?id=%s' % file_id) output = os.path.join(output_dir, 'tmp.tar.gz') gdown.download(url, output, quiet=False) file = tarfile.open(output, 'r:gz') file.extractall(output_dir) file.close() os.remove(output) target_dir = glob.glob(('%s/*' % output_dir))[0] files_to_move = glob.glob(('%s/*' % target_dir)) for f in files_to_move: shutil.move(f, output_dir) os.rmdir(target_dir) print()
def get_data(params, dataset_name, subset=None): load_test = (('train_all' in params) and params['train_all']) test_dataset = None transform = transforms.Compose([transforms.ToTensor()]) if (dataset_name == 'mnist'): dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, transform=transform) if load_test: test_dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, train=False, transform=transform) elif (dataset_name == 'mnist-fashion'): dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, transform=transform) if load_test: test_dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, train=False, transform=transform) elif (dataset_name == 'cifar-10'): dataset = datasets.cifar.CIFAR10(root=config.DATASETS_DIR, download=True, transform=transform) if load_test: test_dataset = datasets.cifar.CIFAR10(root=config.DATASETS_DIR, download=True, train=False, transform=transform) else: raise AttributeError('Dataset not found') if ((subset is not None) and (subset > 0)): dataset = Subset(dataset, random.sample(range(len(dataset)), subset)) if load_test: train_loader = DataLoader(dataset, batch_size=params['train_batch_size'], shuffle=True) val_loader = DataLoader(test_dataset, batch_size=params['val_batch_size'], shuffle=False) else: (train_dataset, val_dataset) = utils.split_dataset(dataset, val_split=params['val_split']) train_loader = DataLoader(train_dataset, batch_size=params['train_batch_size'], shuffle=True) val_loader = DataLoader(val_dataset, batch_size=params['val_batch_size'], shuffle=False) data_batch = next(iter(train_loader))[0] logging.debug('Data batch min: {:.4f}, max: {:.4f}.'.format(torch.min(data_batch), torch.max(data_batch))) logging.debug('Data batch mean: {1:.4f}, std: {0:.4f}.'.format(*torch.std_mean(data_batch))) return (train_loader, val_loader)
def attach_handlers(run, model, optimizer, learning_rule, trainer, evaluator, train_loader, val_loader, params): UnitConvergence(model[0], learning_rule.norm).attach(trainer.engine, 'unit_conv') pbar = ProgressBar(persist=True, bar_format=config.IGNITE_BAR_FORMAT) pbar.attach(trainer.engine, metric_names='all') tqdm_logger = TqdmLogger(pbar=pbar) tqdm_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(trainer.engine)) evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED(every=100), train_loader, val_loader) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=(lambda epoch: (1 - (epoch / params['epochs'])))) lr_scheduler = LRScheduler(lr_scheduler) trainer.engine.add_event_handler(Events.EPOCH_COMPLETED, lr_scheduler) mc_handler = ModelCheckpoint(config.MODELS_DIR, run.replace('/', '-'), n_saved=1, create_dir=True, require_empty=False, global_step_transform=global_step_from_engine(trainer.engine)) trainer.engine.add_event_handler(Events.EPOCH_COMPLETED, mc_handler, {'m': model}) tb_logger = TensorboardLogger(log_dir=os.path.join(config.TENSORBOARD_DIR, run)) (images, labels) = next(iter(train_loader)) tb_logger.writer.add_graph(copy.deepcopy(model).cpu(), images) tb_logger.writer.add_hparams(params, {}) tb_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine)) tb_logger.attach_output_handler(trainer.engine, event_name=Events.EPOCH_COMPLETED, tag='train', metric_names=['unit_conv']) input_shape = tuple(next(iter(train_loader))[0].shape[1:]) tb_logger.attach(trainer.engine, log_handler=WeightsImageHandler(model, input_shape), event_name=Events.EPOCH_COMPLETED) tb_logger.attach(trainer.engine, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.EPOCH_STARTED) return tb_logger
def main(args: Namespace, params: dict, dataset_name, run_postfix=''): identifier = time.strftime('%Y%m%d-%H%M%S') run = '{}/heb/{}'.format(dataset_name, identifier) if run_postfix: run += ('-' + run_postfix) print("Starting run '{}'".format(run)) model = models.create_conv1_model(28, 1, num_kernels=400, n=1, batch_norm=True) if (args.initial_weights is not None): model = utils.load_weights(model, os.path.join(PATH, args.initial_weights)) freeze_layers = ['linear1'] else: freeze_layers = None device = utils.get_device(args.device) model.to(device) print("Device set to '{}'.".format(device)) (train_loader, val_loader) = data.get_data(params, dataset_name, subset=10000) learning_rule = KrotovsRule(delta=params['delta'], k=params['k'], norm=params['norm'], normalize=False) optimizer = Local(named_params=model.named_parameters(), lr=params['lr']) def init_function(h_model): h_criterion = torch.nn.CrossEntropyLoss() h_evaluator = SupervisedEvaluator(model=h_model, criterion=h_criterion, device=device) h_train_evaluator = SupervisedEvaluator(model=h_model, criterion=h_criterion, device=device) h_optimizer = torch.optim.Adam(params=h_model.parameters(), lr=0.001) h_lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(h_optimizer, 'max', verbose=True, patience=5, factor=0.5) h_trainer = SupervisedTrainer(model=h_model, optimizer=h_optimizer, criterion=h_criterion, device=device) h_pbar = ProgressBar(persist=False, bar_format=config.IGNITE_BAR_FORMAT) h_pbar.attach(h_trainer.engine, metric_names='all') h_tqdm_logger = TqdmLogger(pbar=h_pbar) h_tqdm_logger.attach_output_handler(h_evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(h_trainer.engine)) h_tqdm_logger.attach_output_handler(h_train_evaluator.engine, event_name=Events.COMPLETED, tag='train', global_step_transform=global_step_from_engine(h_trainer.engine)) h_evaluator.engine.add_event_handler(Events.COMPLETED, (lambda engine: h_lr_scheduler.step(engine.state.metrics['accuracy']))) h_handler = ModelCheckpoint(config.MODELS_DIR, run.replace('/', '-'), n_saved=1, create_dir=True, require_empty=False, score_name='acc', score_function=(lambda engine: engine.state.metrics['accuracy']), global_step_transform=global_step_from_engine(trainer.engine)) h_evaluator.engine.add_event_handler(Events.EPOCH_COMPLETED, h_handler, {'m': model}) h_es_handler = EarlyStopping(patience=15, min_delta=0.0001, score_function=(lambda engine: engine.state.metrics['accuracy']), trainer=h_trainer.engine, cumulative_delta=True) h_es_handler.logger.setLevel(logging.DEBUG) h_evaluator.engine.add_event_handler(Events.COMPLETED, h_es_handler) return (h_trainer, h_train_evaluator, h_evaluator) evaluator = HebbianEvaluator(model=model, score_name='accuracy', score_function=(lambda engine: engine.state.metrics['accuracy']), epochs=500, init_function=init_function, supervised_from=(- 1)) trainer = HebbianTrainer(model=model, learning_rule=learning_rule, optimizer=optimizer, supervised_from=(- 1), freeze_layers=freeze_layers, device=device) tb_logger = attach_handlers(run, model, optimizer, learning_rule, trainer, evaluator, train_loader, val_loader, params) trainer.run(train_loader=train_loader, epochs=params['epochs']) tb_logger.close()
def main(): model = models.create_fc1_model([(28 ** 2), 2000]) transform = transforms.Compose([transforms.ToTensor()]) dataset = datasets.mnist.MNIST(root=config.DATASETS_DIR, download=True, transform=transform) train_loader = DataLoader(dataset, batch_size=1024, shuffle=True) learning_rule = KrotovsRule() optimizer = Local(named_params=model.named_parameters(), lr=0.01) trainer = HebbianTrainer(model=model, learning_rule=learning_rule, optimizer=optimizer) trainer.run(train_loader=train_loader, epochs=10)
def create_fc1_model(hu: List, n: float=1.0, batch_norm=False): modules = [('flatten', Flatten()), ('linear1', nn.Linear(hu[0], hu[1], bias=False))] if batch_norm: modules.append(('batch_norm', nn.BatchNorm1d(num_features=hu[1]))) modules.append(('repu', RePU(n))) linear2 = nn.Linear(hu[1], 10) modules.append(('linear2', linear2)) return nn.Sequential(OrderedDict(modules))
def create_fc2_model(hu: List, n: float=1.0, batch_norm=False): modules = [('flatten', Flatten()), ('linear1', nn.Linear(hu[0], hu[1], bias=False))] if batch_norm: modules.append(('batch_norm', nn.BatchNorm1d(num_features=hu[1]))) modules.append(('repu1', RePU(n))) modules.append(('linear2', nn.Linear(hu[1], hu[2], bias=False))) modules.append(('repu2', RePU(n))) linear3 = nn.Linear(hu[2], 10) modules.append(('linear3', linear3)) return nn.Sequential(OrderedDict(modules))
def create_conv1_model(input_dim, input_channels=1, num_kernels=8, kernel_size=5, pool_size=2, n=1, batch_norm=False, dropout=None): modules = [('conv1', nn.Conv2d(input_channels, num_kernels, kernel_size, bias=False))] if batch_norm: modules.append(('batch_norm', nn.BatchNorm2d(num_features=num_kernels))) modules.extend([('repu', RePU(n)), ('pool1', nn.MaxPool2d(pool_size))]) if (dropout is not None): modules.append(('dropout1', nn.Dropout2d(dropout))) modules.extend([('flatten', Flatten()), ('linear1', nn.Linear((num_kernels * (int(((input_dim - (kernel_size - 1)) / 2)) ** 2)), 10))]) return nn.Sequential(OrderedDict(modules))
def create_conv2_model(input_dim, input_channels=1, num_kernels=None, kernel_size=4, pool_size=2, n=1): if (num_kernels is None): num_kernels = [8, 16] modules = [('conv1', nn.Conv2d(input_channels, num_kernels[0], kernel_size, bias=False)), ('repu1', RePU(n)), ('pool1', nn.MaxPool2d(pool_size)), ('conv2', nn.Conv2d(num_kernels[0], num_kernels[1], kernel_size, bias=False)), ('repu2', RePU(n)), ('pool2', nn.MaxPool2d(pool_size)), ('flatten', Flatten()), ('linear1', nn.Linear((num_kernels[1] * (int(((((input_dim - (kernel_size - 1)) / 2) - (kernel_size - 1)) / 2)) ** 2)), 10))] return nn.Sequential(OrderedDict(modules))
def attach_handlers(run, model, optimizer, trainer, train_evaluator, evaluator, train_loader, val_loader, params): pbar = ProgressBar(persist=True, bar_format=config.IGNITE_BAR_FORMAT) pbar.attach(trainer.engine, metric_names='all') tqdm_logger = TqdmLogger(pbar=pbar) tqdm_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', global_step_transform=global_step_from_engine(trainer.engine)) tqdm_logger.attach_output_handler(train_evaluator.engine, event_name=Events.COMPLETED, tag='train', global_step_transform=global_step_from_engine(trainer.engine)) train_evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED, train_loader) evaluator.attach(trainer.engine, Events.EPOCH_COMPLETED, data=val_loader) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=5, factor=0.5) evaluator.engine.add_event_handler(Events.COMPLETED, (lambda engine: lr_scheduler.step(engine.state.metrics['accuracy']))) es_handler = EarlyStopping(patience=15, score_function=(lambda engine: engine.state.metrics['accuracy']), trainer=trainer.engine, cumulative_delta=True, min_delta=0.0001) if (('train_all' in params) and params['train_all']): train_evaluator.engine.add_event_handler(Events.COMPLETED, es_handler) else: evaluator.engine.add_event_handler(Events.COMPLETED, es_handler) es_handler.logger.setLevel(logging.DEBUG) name = run.replace('/', '-') mc_handler = ModelCheckpoint(config.MODELS_DIR, name, n_saved=1, create_dir=True, require_empty=False, score_name='acc', score_function=(lambda engine: engine.state.metrics['accuracy']), global_step_transform=global_step_from_engine(trainer.engine)) evaluator.engine.add_event_handler(Events.EPOCH_COMPLETED, mc_handler, {'m': model}) tb_logger = TensorboardLogger(log_dir=os.path.join(config.TENSORBOARD_DIR, run)) (images, labels) = next(iter(train_loader)) tb_logger.writer.add_graph(copy.deepcopy(model).cpu(), images) tb_logger.writer.add_hparams(params, {'hparam/dummy': 0}) tb_logger.attach_output_handler(train_evaluator.engine, event_name=Events.COMPLETED, tag='train', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine)) tb_logger.attach_output_handler(evaluator.engine, event_name=Events.COMPLETED, tag='validation', metric_names='all', global_step_transform=global_step_from_engine(trainer.engine)) input_shape = tuple(next(iter(train_loader))[0].shape[1:]) tb_logger.attach(trainer.engine, log_handler=WeightsImageHandler(model, input_shape), event_name=Events.EPOCH_COMPLETED) tb_logger.attach(trainer.engine, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.EPOCH_STARTED) return (es_handler, tb_logger)
def main(params, dataset_name, transfer_learning=False): identifier = time.strftime('%Y%m%d-%H%M%S') run = '{}/sup/{}'.format(dataset_name, identifier) if transfer_learning: run += '-tl' if (('train_all' in params) and params['train_all']): run += '-test' print("Starting run '{}'".format(run)) model = models.create_conv1_model(28, 1, num_kernels=400, n=1, batch_norm=True) if transfer_learning: weights_path = '../output/models/heb-mnist-fashion-20200607-015911_m_100_acc=0.855.pth' model = utils.load_weights(model, os.path.join(PATH, weights_path), layer_names=['conv1'], freeze=True) device = utils.get_device() model.to(device) print("Device set to '{}'.".format(device)) (train_loader, val_loader) = data.get_data(params, dataset_name, subset=params['train_subset']) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(params=model.parameters(), lr=params['lr']) train_evaluator = SupervisedEvaluator(model=model, criterion=criterion) evaluator = SupervisedEvaluator(model=model, criterion=criterion) trainer = SupervisedTrainer(model=model, optimizer=optimizer, criterion=criterion, device=device) (es_handler, tb_logger) = attach_handlers(run, model, optimizer, trainer, train_evaluator, evaluator, train_loader, val_loader, params) trainer.run(train_loader=train_loader, epochs=params['epochs']) tb_logger.writer.add_hparams(params, {'hparam/accuracy': es_handler.best_score}) tb_logger.close()
def main(params): model = models.create_fc1_model([(28 ** 2), 2000], n=1, batch_norm=True) weights_path = '../output/models/heb-mnist-fashion-20200426-101420_m_500_acc=0.852.pth' state_dict_path = os.path.join(PATH, weights_path) model = load_weights(model, state_dict_path) run = os.path.splitext(os.path.basename(weights_path))[0].split('_')[0] run += '/test' logging.info("Starting run '{}'.".format(run)) transform = transforms.Compose([transforms.ToTensor()]) dataset = datasets.mnist.FashionMNIST(root=config.DATASETS_DIR, download=True, transform=transform, train=False) test_loader = DataLoader(dataset, batch_size=params['val_batch_size'], shuffle=False) criterion = torch.nn.CrossEntropyLoss() evaluator = SupervisedEvaluator(model=model, criterion=criterion) evaluator.run(test_loader) print(evaluator.metrics)
class SimpleEngine(Engine): 'Custom engine with custom run function.\n\n This engine has only metrics in its state and only fires 2 events.\n ' def __init__(self, run_function: Callable): super().__init__(process_function=(lambda x, y: None)) self._allowed_events = [Events.STARTED, Events.COMPLETED] self._run_function = run_function def run(self, *args, **kwargs): self._fire_event(Events.STARTED) self._run_function(*args, **kwargs) self._fire_event(Events.COMPLETED)
class Evaluator(ABC): def __init__(self): self.engine = None self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__)) def attach(self, engine, event_name, *args, **kwargs): if (event_name not in State.event_to_attr): raise RuntimeError("Unknown event name '{}'".format(event_name)) return engine.add_event_handler(event_name, self, *args, **kwargs) def run(self, *args, **kwargs): self.engine.run(*args, **kwargs) def __call__(self, engine, *args, **kwargs): self.run(*args, **kwargs)
class HebbianEvaluator(Evaluator): def __init__(self, model: torch.nn.Module, score_name: str, score_function: Callable, init_function: Callable[([torch.nn.Module], tuple)]=None, epochs: int=100, supervised_from: int=None): super().__init__() self.model = model self.score_name = score_name self.score_function = score_function if (init_function is None): self.init_function = self._init_function else: self.init_function = init_function self.epochs = epochs self.supervised_from = supervised_from self.engine = self.create_hebbian_evaluator(self._run) self._init_metrics() @staticmethod def create_hebbian_evaluator(run_function) -> Engine: return SimpleEngine(run_function=run_function) @staticmethod def _init_function(model): 'Default initialization function.' criterion = torch.nn.CrossEntropyLoss() evaluator = SupervisedEvaluator(model=model, criterion=criterion) train_evaluator = SupervisedEvaluator(model=model, criterion=criterion) optimizer = torch.optim.Adam(params=model.parameters()) trainer = SupervisedTrainer(model=model, optimizer=optimizer, criterion=criterion) es_handler = EarlyStopping(patience=5, min_delta=0.0001, score_function=(lambda engine: (- engine.state.metrics['loss'])), trainer=trainer.engine, cumulative_delta=True) evaluator.engine.add_event_handler(Events.COMPLETED, es_handler) return (trainer, train_evaluator, evaluator) def _init_metrics(self): self.best_score = None def _init(self, train_loader, val_loader): (self._trainer, self._train_evaluator, self._evaluator) = self.init_function(self.model) self._train_evaluator.attach(self._trainer.engine, Events.EPOCH_COMPLETED, train_loader) self._evaluator.attach(self._trainer.engine, Events.EPOCH_COMPLETED, val_loader) @self._evaluator.engine.on(Events.COMPLETED) def save_best_metrics(eval_engine): current_score = self.score_function(eval_engine) if ((self.best_score is None) or (current_score > self.best_score)): self.best_score = current_score self.engine.state.metrics = eval_engine.state.metrics self.logger.info('New best validation {} = {:.4f}.'.format(self.score_name, self.best_score)) self._init_metrics() def _run(self, train_loader, val_loader): self.logger.info("Supervised training from layer '{}'.".format(list(self.model.named_children())[self.supervised_from][0])) self._init(train_loader, val_loader) layers = list(self.model.children()) for layer in layers[:self.supervised_from]: for param in layer.parameters(): param.requires_grad = False for lyr in layers[self.supervised_from:]: try: lyr.reset_parameters() except AttributeError: pass self._trainer.run(train_loader=train_loader, epochs=self.epochs)
class SupervisedEvaluator(Evaluator): def __init__(self, model, criterion, metrics=None, device=None): super().__init__() self.device = utils.get_device(device) if (metrics is None): metrics = {'accuracy': Accuracy(), 'loss': Loss(criterion)} self.engine = create_supervised_evaluator(model, metrics=metrics, device=self.device)
class OutputHandler(BaseOutputHandler): 'Helper handler to log engine\'s output and/or metrics.\n\n Args:\n tag (str): common title for all produced plots. For example, \'training\'\n metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{\'loss\': loss1, \'another_loss\': loss2}` to label the plot\n with corresponding keys.\n global_step_transform (callable, optional): global step transform function to output a desired global step.\n ' def __init__(self, tag, metric_names='all', output_transform=None, global_step_transform=None): super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform) def __call__(self, engine, logger, event_name): if (not isinstance(logger, TqdmLogger)): raise RuntimeError("Handler 'OutputHandler' works only with TqdmLogger") metrics = self._setup_output_metrics(engine) global_step = self.global_step_transform(engine, event_name) if (not isinstance(global_step, int)): raise TypeError('global_step must be int, got {}. Please check the output of global_step_transform.'.format(type(global_step))) message = '{} epoch {}: '.format(self.tag.capitalize(), global_step) metrics_str = [] for (key, value) in metrics.items(): if (isinstance(value, numbers.Number) or (isinstance(value, torch.Tensor) and (value.ndimension() == 0))): if (value > 10000.0): metrics_str.append('{}={:.4e}'.format(key, value)) else: metrics_str.append('{}={:.4f}'.format(key, value)) elif (isinstance(value, torch.Tensor) and (value.ndimension() == 1)): for (i, v) in enumerate(value): metrics_str.append('{}{}={}'.format(key, i, v.item())) else: warnings.warn('TqdmLogger output_handler can not log metrics value type {}'.format(type(value))) logger.pbar.log_message((message + ', '.join(metrics_str)))
class TqdmLogger(BaseLogger): 'Tqdm logger to log messages using the progress bar.' def __init__(self, pbar): self.pbar = pbar def close(self): if self.pbar: self.pbar.close() self.pbar = None def _create_output_handler(self, *args, **kwargs): return OutputHandler(*args, **kwargs) def _create_opt_params_handler(self, *args, **kwargs): 'Intentionally empty' pass
class HebbsRule(LearningRule): def __init__(self, c=0.1): super().__init__() self.c = c def update(self, inputs, w): d_ws = torch.zeros(inputs.size(0)) for (idx, x) in enumerate(inputs): y = torch.dot(w, x) d_w = torch.zeros(w.shape) for i in range(y.shape[0]): for j in range(x.shape[0]): d_w[(i, j)] = ((self.c * x[j]) * y[i]) d_ws[idx] = d_w return torch.mean(d_ws, dim=0)
class KrotovsRule(LearningRule): 'Krotov-Hopfield Hebbian learning rule fast implementation.\n\n Original source: https://github.com/DimaKrotov/Biological_Learning\n\n Args:\n precision: Numerical precision of the weight updates.\n delta: Anti-hebbian learning strength.\n norm: Lebesgue norm of the weights.\n k: Ranking parameter\n ' def __init__(self, precision=1e-30, delta=0.4, norm=2, k=2, normalize=False): super().__init__() self.precision = precision self.delta = delta self.norm = norm self.k = k self.normalize = normalize def init_layers(self, layers: list): for layer in [lyr.layer for lyr in layers]: if ((type(layer) == torch.nn.Linear) or (type(layer) == torch.nn.Conv2d)): layer.weight.data.normal_(mean=0.0, std=1.0) def update(self, inputs: torch.Tensor, weights: torch.Tensor): batch_size = inputs.shape[0] num_hidden_units = weights.shape[0] input_size = inputs[0].shape[0] assert (self.k <= num_hidden_units), 'The amount of hidden units should be larger or equal to k!' if self.normalize: norm = torch.norm(inputs, dim=1) norm[(norm == 0)] = 1 inputs = torch.div(inputs, norm.view((- 1), 1)) inputs = torch.t(inputs) tot_input = torch.matmul((torch.sign(weights) * (torch.abs(weights) ** (self.norm - 1))), inputs) (_, indices) = torch.topk(tot_input, k=self.k, dim=0) activations = torch.zeros((num_hidden_units, batch_size)) activations[(indices[0], torch.arange(batch_size))] = 1.0 activations[(indices[(self.k - 1)], torch.arange(batch_size))] = (- self.delta) xx = torch.sum(torch.mul(activations, tot_input), 1) norm_factor = torch.mul(xx.view(xx.shape[0], 1).repeat((1, input_size)), weights) ds = (torch.matmul(activations, torch.t(inputs)) - norm_factor) nc = torch.max(torch.abs(ds)) if (nc < self.precision): nc = self.precision d_w = torch.true_divide(ds, nc) return d_w
class LearningRule(ABC): def __init__(self): self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__)) def init_layers(self, model): pass @abstractmethod def update(self, x, w): pass
class OjasRule(LearningRule): def __init__(self, c=0.1): super().__init__() self.c = c def update(self, inputs, w): d_ws = torch.zeros(inputs.size(0), *w.shape) for (idx, x) in enumerate(inputs): y = torch.mm(w, x.unsqueeze(1)) d_w = torch.zeros(w.shape) for i in range(y.shape[0]): for j in range(x.shape[0]): d_w[(i, j)] = ((self.c * y[i]) * (x[j] - (y[i] * w[(i, j)]))) d_ws[idx] = d_w return torch.mean(d_ws, dim=0)
class UnitConvergence(Metric): def __init__(self, layer: torch.nn.Module, norm: int, tolerance: int=0.1, output_transform=(lambda x: x), device=None): self.layer = layer self.norm = norm self.tolerance = tolerance super(UnitConvergence, self).__init__(output_transform=output_transform, device=device) def reset(self): super(UnitConvergence, self).reset() def update(self, output): pass def compute(self): if (type(self.layer) == torch.nn.Linear): weights = self.layer.weight.detach() elif (type(self.layer) == torch.nn.Conv2d): weights = self.layer.weight.detach() weights = weights.view((- 1), (self.layer.kernel_size[0] * self.layer.kernel_size[1])) else: raise TypeError("Layer type '{}' not supported!".format(type(self.layer))) sums = torch.sum(torch.pow(torch.abs(weights), self.norm), 1) num_converged = torch.sum((sums < (1 + self.tolerance))) num = sums.shape[0] return (float(num_converged) / num)
class Flatten(nn.Module): def forward(self, x: torch.Tensor): return x.view(x.size(0), (- 1))
class RePU(nn.ReLU): def __init__(self, n): super(RePU, self).__init__() self.n = n def forward(self, x: torch.Tensor): return (torch.relu(x) ** self.n)
class SPELoss(Module): def __init__(self, m=1, beta=0.1): super(SPELoss, self).__init__() self.m = m self.beta = beta def forward(self, output, target): output = torch.tanh((self.beta * output)) target = ((torch.nn.functional.one_hot(target, num_classes=output.shape[1]) * 2) - 1) loss = torch.sum((torch.abs((output - target)) ** self.m)) return loss
class Local(Optimizer): def __init__(self, named_params, lr=required): (self.param_names, params) = zip(*named_params) if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {}'.format(lr)) defaults = dict(lr=lr) super(Local, self).__init__(params, defaults) def local_step(self, d_p, layer_name, closure=None): 'Performs a single local optimization step.' loss = None if (closure is not None): loss = closure() for group in self.param_groups: layer_index = self.param_names.index((layer_name + '.weight')) p = group['params'][layer_index] p.data.add_((group['lr'] * d_p)) try: self._step_count += 1 except AttributeError: pass return loss
class Trainer(ABC): 'Abstract base trainer class.\n\n Supports (optional) evaluating and visualizing by default.\n ' def __init__(self, engine, model: torch.nn.Module, device: Optional[Union[(str, torch.device)]]=None): self.engine = engine self.model = model self.device = utils.get_device(device) self.logger = logging.getLogger(((__name__ + '.') + self.__class__.__name__)) def run(self, train_loader: DataLoader, epochs: int=10): self.engine.run(train_loader, max_epochs=epochs)
class SupervisedTrainer(Trainer): 'Trains a model using classical supervised backpropagation.\n\n Args:\n model: The model to be trained.\n optimizer: The optimizer used to train the model.\n criterion: The criterion used for calculating the loss.\n device: The device to be used.\n ' def __init__(self, model: torch.nn.Module, optimizer: Optimizer, criterion, device: Optional[Union[(str, torch.device)]]=None): device = utils.get_device(device) engine = create_supervised_trainer(model, optimizer, criterion, device=device) RunningAverage(output_transform=(lambda x: x)).attach(engine, 'loss') super().__init__(engine=engine, model=model, device=device)
class HebbianTrainer(Trainer): 'Trains a model using unsupervised local learning rules also known as Hebbian learning.\n\n The specified learning rule is used to perform local weight updates after each batch of data. Per batch all\n trainable layers are updated in sequence.\n\n Args:\n model (torch.nn.Sequential): The model to be trained.\n learning_rule (LearningRule | Dict[str, LearningRule]):\n The learning rule(s) used to update the model weights.\n optimizer (Optimizer): The optimizer used to perform the weight updates.\n supervised_from (int): From which layer (name) the training should be performed supervised.\n freeze_layers (list): Layers (names) to freeze during training.\n device (Optional[Union[str, torch.device]]): The device to perform the training on.\n\n Attributes:\n supervised_from: See the supervised_from arg.\n freeze_layers: See the freeze_layers arg.\n layers: The Hebbian trainable layers.\n ' def __init__(self, model: torch.nn.Sequential, learning_rule: Union[(LearningRule, Dict[(str, LearningRule)])], optimizer: Optimizer, supervised_from: int=(- 1), freeze_layers: List[str]=None, complete_forward: bool=False, single_forward: bool=False, device: Optional[Union[(str, torch.device)]]=None): device = utils.get_device(device) engine = self.create_hebbian_trainer(model, learning_rule, optimizer, device=device) self.supervised_from = supervised_from self.freeze_layers = freeze_layers self.complete_forward = complete_forward self.single_forward = single_forward if (self.freeze_layers is None): self.freeze_layers = [] Layer = namedtuple('Layer', ['idx', 'name', 'layer']) self.layers = [] for (idx, (name, layer)) in enumerate(list(model.named_children())[:self.supervised_from]): if (((type(layer) == torch.nn.Linear) or (type(layer) == torch.nn.Conv2d)) and (name not in self.freeze_layers)): self.layers.append(Layer(idx, name, layer)) self.learning_rule = learning_rule if (type(self.learning_rule) == dict): for rule in self.learning_rule.values(): rule.init_layers(self.layers) else: self.learning_rule.init_layers(self.layers) super().__init__(engine=engine, model=model, device=device) self.logger.info('Received {} trainable layer(s): {}.'.format(len(self.layers), [lyr.name for lyr in self.layers])) if self.single_forward: self._hooks = {} self._inputs = {} self._outputs = {} for lyr in self.layers: self._hooks[lyr.name] = lyr.layer.register_forward_hook(partial(self._store_data_hook, layer_name=lyr.name)) def _store_data_hook(self, _, inp, output, layer_name): self._inputs[layer_name] = inp[0] self._outputs[layer_name] = output def _prepare_data(self, inputs, model, layer_index): 'Prepare the inputs and layer weights to be passed to the learning rule.\n\n Args:\n inputs: The input to the model.\n model: The model to be trained.\n layer_index: The index of the layer currently being trained.\n ' layers = list(model.children()) layer = layers[layer_index] if (layer_index == 0): x = inputs else: x = inputs for lyr in layers[:layer_index]: x = lyr(x) if self.complete_forward: for lyr in layers[layer_index:]: x = lyr(x) if (type(layer) == torch.nn.Linear): w = layer.weight elif (type(layer) == torch.nn.Conv2d): w = layer.weight w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1])) x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation) else: raise TypeError('Unsupported layer type!') x = x.view((x.shape[0], (- 1))) self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape))) return (x, w) def _prepare_data2(self, layer, layer_name): x = self._inputs[layer_name] y = self._outputs[layer_name] if (type(layer) == torch.nn.Linear): w = layer.weight elif (type(layer) == torch.nn.Conv2d): w = layer.weight w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1])) x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation) else: raise TypeError('Unsupported layer type!') x = x.view((x.shape[0], (- 1))) self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape))) return (x, y, w) def _forward(self, inputs, model): if self.complete_forward: model(inputs) else: layers = list(model.children()) x = inputs for lyr in layers[:(self.supervised_from - 1)]: x = lyr(x) def create_hebbian_trainer(self, model: torch.nn.Module, learning_rule, optimizer, device=None, non_blocking=False, prepare_batch=utils.prepare_batch, output_transform=(lambda x, y: 0)): def _update(_, batch: Sequence[torch.Tensor]): model.train() with torch.no_grad(): (x, y) = prepare_batch(batch, device=device, non_blocking=non_blocking) if self.single_forward: self._forward(x, model) for (layer_index, layer_name, layer) in self.layers: self.logger.debug("Updating layer '{}' with shape {}.".format(layer, layer.weight.shape)) if self.single_forward: (inputs, _, weights) = self._prepare_data2(layer, layer_name) else: (inputs, weights) = self._prepare_data(x, model, layer_index) if (type(learning_rule) == dict): try: rule = learning_rule[layer_name] except KeyError: self.logger.error("No learning rule was specified for layer '{}'!".format(layer_name)) raise else: rule = learning_rule d_p = rule.update(inputs, weights) d_p = d_p.view(*layer.weight.size()) optimizer.local_step(d_p, layer_name=layer_name) return output_transform(x, y) return Engine(_update)
def plot_to_img(fig): 'Takes a matplotlib figure handle and converts it using canvas and string-casts to a numpy array that can be\n visualized in TensorBoard using the add_image function\n ' fig.canvas.draw() img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') img = img.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,))) img = (img / 255.0) img = np.transpose(img, (2, 0, 1)) plt.close(fig) return img
def extract_image_patches(x, kernel_size, stride=(1, 1), dilation=1, padding=0): (b, c, h, w) = x.shape patches = x.unfold(2, kernel_size[0], stride[0]).unfold(3, kernel_size[1], stride[1]) patches = patches.permute(0, 4, 5, 1, 2, 3).contiguous() return patches.view((- 1), kernel_size[0], kernel_size[1])
def split_dataset(dataset, val_split): val_size = int((val_split * len(dataset))) train_size = (len(dataset) - val_size) (train_dataset, val_dataset) = random_split(dataset, [train_size, val_size]) return (train_dataset, val_dataset)
def prepare_batch(batch, device=None, non_blocking=False): 'Prepare batch for training: pass to a device with options.' (x, y) = batch return (convert_tensor(x, device=device, non_blocking=non_blocking), convert_tensor(y, device=device, non_blocking=non_blocking))
def load_weights(model: torch.nn.Module, state_dict_path, layer_names: List=None, freeze=False): 'Load model weights from a stored state dict. Optionally only load weights for the specified layer.\n\n Args:\n model: The model acquiring the weights.\n state_dict_path: The path of the source state dict\n layer_names: The names of the layer to load. Each name can also be a tuple specifying a source, destination\n weight name mapping.\n freeze: Freeze the loaded weights.\n ' if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' state_dict = torch.load(state_dict_path, map_location=torch.device(device)) if (layer_names is not None): state_dict = extract_layers_from_state_dict(state_dict, layer_names=layer_names) model.load_state_dict(state_dict, strict=(False if (layer_names is not None) else True)) logger.info("Loaded initial model weights for layer(s) {} from '{}'.".format(layer_names, state_dict_path)) if freeze: layers = [] for layer_name in layer_names: if (type(layer_name) == tuple): layers.append(dict(model.named_children())[layer_name[1]]) else: layers.append(dict(model.named_children())[layer_name]) for layer in layers: for param in layer.parameters(): param.requires_grad = False logger.info('Freezed layer(s) {}.'.format([(ln[0] if (type(ln) == tuple) else ln) for ln in layer_names])) return model
def extract_layers_from_state_dict(state_dict: dict, layer_names: List[str]): 'Extract layers from a state dict.' new_state_dict = {} for layer_name in layer_names: if (type(layer_name) == tuple): old_layer_name = layer_name[0] new_layer_name = layer_name[1] else: old_layer_name = new_layer_name = layer_name old_layer_name = '{}.weight'.format(old_layer_name) new_layer_name = '{}.weight'.format(new_layer_name) new_state_dict[new_layer_name] = state_dict[old_layer_name] return new_state_dict