Spaces:
Runtime error
Runtime error
#!/usr/bin/env python3 | |
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs | |
The FID metric calculates the distance between two distributions of images. | |
Typically, we have summary statistics (mean & covariance matrix) of one | |
of these distributions, while the 2nd distribution is given by a GAN. | |
When run as a stand-alone program, it compares the distribution of | |
images that are stored as PNG/JPEG at a specified location with a | |
distribution given by summary statistics (in pickle format). | |
The FID is calculated by assuming that X_1 and X_2 are the activations of | |
the pool_3 layer of the inception net for generated samples and real world | |
samples respectivly. | |
See --help to see further details. | |
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead | |
of Tensorflow | |
Copyright 2018 Institute of Bioinformatics, JKU Linz | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. | |
""" | |
import os | |
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter | |
import numpy as np | |
import torch | |
import torch.utils.data | |
import torchvision.transforms as transforms | |
import tqdm | |
from PIL import Image | |
from scipy import linalg | |
from torch.nn.functional import adaptive_avg_pool2d | |
from torch.utils import data | |
from eval_utils.inceptionV3 import InceptionV3 | |
class Dataset(data.Dataset): | |
'Characterizes a dataset for PyTorch' | |
def __init__(self, path, transform=None): | |
'Initialization' | |
self.file_names = self.get_filenames(path) | |
self.transform = transform | |
def __len__(self): | |
'Denotes the total number of samples' | |
return len(self.file_names) | |
def __getitem__(self, index): | |
'Generates one sample of data' | |
img = Image.open(self.file_names[index]).convert('RGB') | |
# Convert image and label to torch tensors | |
if self.transform is not None: | |
img = self.transform(img) | |
return img | |
def get_filenames(self, data_path): | |
images = [] | |
for path, subdirs, files in os.walk(data_path): | |
for name in files: | |
if name.rfind('jpg') != -1 or name.rfind('png') != -1: | |
filename = os.path.join(path, name) | |
if os.path.isfile(filename): | |
images.append(filename) | |
return images | |
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) | |
parser.add_argument('--batch-size', type=int, default=64, | |
help='Batch size to use') | |
parser.add_argument('--dims', type=int, default=2048, | |
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), | |
help=('Dimensionality of Inception features to use. ' | |
'By default, uses pool3 features')) | |
parser.add_argument('-c', '--gpu', default='', type=str, | |
help='GPU to use (leave blank for CPU only)') | |
parser.add_argument('--path1', type=str, help='path to images') | |
parser.add_argument('--path2', type=str, help='path to images') | |
def get_activations(images, model, batch_size=64, dims=2048, cuda=False, verbose=True): | |
"""Calculates the activations of the pool_3 layer for all images. | |
Params: | |
-- images : Numpy array of dimension (n_images, 3, hi, wi). The values | |
must lie between 0 and 1. | |
-- model : Instance of inception model | |
-- batch_size : the images numpy array is split into batches with | |
batch size batch_size. A reasonable batch size depends | |
on the hardware. | |
-- dims : Dimensionality of features returned by Inception | |
-- cuda : If set to True, use GPU | |
-- verbose : If set to True and parameter out_step is given, the number | |
of calculated batches is reported. | |
Returns: | |
-- A numpy array of dimension (num images, dims) that contains the | |
activations of the given tensor when feeding inception with the | |
query tensor. | |
""" | |
model.eval() | |
# d0 = images.shape[0] | |
d0 = images.__len__() * batch_size | |
if batch_size > d0: | |
print(('Warning: batch size is bigger than the data size. ' | |
'Setting batch size to data size')) | |
batch_size = d0 | |
n_batches = d0 // batch_size | |
n_used_imgs = n_batches * batch_size | |
pred_arr = np.empty((n_used_imgs, dims)) | |
# for i in range(n_batches): | |
for i, batch in tqdm.tqdm(enumerate(images)): | |
# batch = batch[0] | |
# if verbose: | |
# print('\rPropagating batch %d/%d' % (i + 1, n_batches), end='', flush=True) | |
# import ipdb | |
# ipdb.set_trace() | |
start = i * batch_size | |
end = start + batch_size | |
# batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor) | |
# batch = Variable(batch, volatile=True) | |
if cuda: | |
batch = batch.cuda() | |
pred = model(batch)[0] | |
# If model output is not scalar, apply global spatial average pooling. | |
# This happens if you choose a dimensionality not equal 2048. | |
if pred.shape[2] != 1 or pred.shape[3] != 1: | |
pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) | |
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1) | |
if verbose: | |
print(' done') | |
return pred_arr | |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): | |
"""Numpy implementation of the Frechet Distance. | |
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) | |
and X_2 ~ N(mu_2, C_2) is | |
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). | |
Stable version by Dougal J. Sutherland. | |
Params: | |
-- mu1 : Numpy array containing the activations of a layer of the | |
inception net (like returned by the function 'get_predictions') | |
for generated samples. | |
-- mu2 : The sample mean over activations, precalculated on an | |
representive data set. | |
-- sigma1: The covariance matrix over activations for generated samples. | |
-- sigma2: The covariance matrix over activations, precalculated on an | |
representive data set. | |
Returns: | |
-- : The Frechet Distance. | |
""" | |
mu1 = np.atleast_1d(mu1) | |
mu2 = np.atleast_1d(mu2) | |
sigma1 = np.atleast_2d(sigma1) | |
sigma2 = np.atleast_2d(sigma2) | |
assert mu1.shape == mu2.shape, \ | |
'Training and test mean vectors have different lengths' | |
assert sigma1.shape == sigma2.shape, \ | |
'Training and test covariances have different dimensions' | |
diff = mu1 - mu2 | |
# Product might be almost singular | |
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) | |
if not np.isfinite(covmean).all(): | |
msg = ('fid calculation produces singular product; ' | |
'adding %s to diagonal of cov estimates') % eps | |
print(msg) | |
offset = np.eye(sigma1.shape[0]) * eps | |
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) | |
# Numerical error might give slight imaginary component | |
if np.iscomplexobj(covmean): | |
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): | |
m = np.max(np.abs(covmean.imag)) | |
raise ValueError('Imaginary component {}'.format(m)) | |
covmean = covmean.real | |
tr_covmean = np.trace(covmean) | |
return (diff.dot(diff) + np.trace(sigma1) + | |
np.trace(sigma2) - 2 * tr_covmean) | |
def calculate_activation_statistics(images, model, batch_size=64, | |
dims=2048, cuda=False, verbose=True): | |
"""Calculation of the statistics used by the FID. | |
Params: | |
-- images : Numpy array of dimension (n_images, 3, hi, wi). The values | |
must lie between 0 and 1. | |
-- model : Instance of inception model | |
-- batch_size : The images numpy array is split into batches with | |
batch size batch_size. A reasonable batch size | |
depends on the hardware. | |
-- dims : Dimensionality of features returned by Inception | |
-- cuda : If set to True, use GPU | |
-- verbose : If set to True and parameter out_step is given, the | |
number of calculated batches is reported. | |
Returns: | |
-- mu : The mean over samples of the activations of the pool_3 layer of | |
the inception model. | |
-- sigma : The covariance matrix of the activations of the pool_3 layer of | |
the inception model. | |
""" | |
act = get_activations(images, model, batch_size, dims, cuda, verbose) | |
mu = np.mean(act, axis=0) | |
sigma = np.cov(act, rowvar=False) | |
return mu, sigma | |
def _compute_statistics_of_path(path, model, batch_size, dims, cuda): | |
if path.endswith('.npz'): | |
f = np.load(path) | |
m, s = f['mu'][:], f['sigma'][:] | |
f.close() | |
else: | |
dataset = Dataset(path, transforms.Compose([ | |
transforms.Resize((299, 299)), | |
transforms.ToTensor(), | |
])) | |
print(dataset.__len__()) | |
if dataset.__len__() < batch_size: | |
batch_size = 1 | |
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, drop_last=True, | |
num_workers=0) | |
m, s = calculate_activation_statistics(dataloader, model, batch_size, dims, cuda) | |
return m, s | |
def calculate_fid_given_paths(paths, batch_size, cuda, dims): | |
"""Calculates the FID of two paths""" | |
for p in paths: | |
if not os.path.exists(p): | |
raise RuntimeError('Invalid path: %s' % p) | |
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] | |
model = InceptionV3([block_idx]) | |
if cuda: | |
model.cuda() | |
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size, dims, cuda) | |
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size, dims, cuda) | |
fid_value = calculate_frechet_distance(m1, s1, m2, s2) | |
return fid_value | |
if __name__ == '__main__': | |
args = parser.parse_args() | |
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu | |
paths = ["", ""] | |
paths[0] = args.path1 | |
paths[1] = args.path2 | |
print(paths) | |
fid_value = calculate_fid_given_paths(paths, args.batch_size, args.gpu, args.dims) | |
print('FID: ', fid_value) | |