Spaces:
Runtime error
Runtime error
File size: 1,526 Bytes
1b2a9b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('./')
import time
from swapae.options import TestOptions
import swapae.models as models
from swapae.evaluation import GroupEvaluator
import swapae.data as data
import torchvision.utils as vutils
import torch
from PIL import Image
import torchvision.transforms as transforms
opt = TestOptions().parse()
#dataset = data.create_dataset(opt)
#evaluators = GroupEvaluator(opt)
model = models.create_model(opt)
#evaluators.evaluate(model, dataset, opt.resume_iter)
structure_path = '/home/xtli/Dropbox/swapping-autoencoder-pytorch/testphotos/ffhq512/fig9/structure/12000.png'
style_path = '/home/xtli/Dropbox/swapping-autoencoder-pytorch/testphotos/ffhq512/fig9/style/11104.png'
structure_img = Image.open(structure_path)
style_img = Image.open(style_path)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor()])
structure_img = transform(structure_img).unsqueeze(0)
style_img = transform(style_img).unsqueeze(0)
structure_img = structure_img * 2 - 1
style_img = style_img * 2 - 1
s_time = time.time()
with torch.no_grad():
structure_feat = model(structure_img, command="encode")[0]
style_feat = model(style_img, command="encode")[1]
rec = model(structure_feat, style_feat, command="decode")
e_time = time.time()
print(e_time - s_time)
rec = (rec + 1) / 2
vutils.save_image(rec, 'rec.png')
|