David Piscasio commited on
Commit
195fb1b
1 Parent(s): efb4d80

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -0
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from options.test_options import TestOptions
2
+ from models import create_model
3
+ import torch
4
+ import numpy as np
5
+ import gradio as gr
6
+ from einops import rearrange
7
+ import torchvision
8
+ import torchvision.transforms as transforms
9
+
10
+ def tensor2im(input_image, imtype=np.uint8):
11
+ if not isinstance(input_image, np.ndarray):
12
+ if isinstance(input_image, torch.Tensor): # get the data from a variable
13
+ image_tensor = input_image.data
14
+ else:
15
+ return input_image
16
+ image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
17
+ if image_numpy.shape[0] == 1: # grayscale to RGB
18
+ image_numpy = np.tile(image_numpy, (3, 1, 1))
19
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
20
+ else: # if it is a numpy array, do nothing
21
+ image_numpy = input_image
22
+ return image_numpy.astype(imtype)
23
+
24
+ def get_model(translation):
25
+ if translation == 'Orange to Apple':
26
+ return 'orange2apple'
27
+ elif translation == 'Horse to Zebra':
28
+ return 'horse2zebra'
29
+ elif translation == 'Image to Van Gogh':
30
+ return 'style_vangogh'
31
+ elif translation == 'Image to Monet':
32
+ return 'style_monet'
33
+
34
+ def unpaired_img2img(translation, image):
35
+ opt = TestOptions().parse()
36
+ m_name = get_model(translation)
37
+ opt.name = m_name + '_pretrained'
38
+ opt.model = 'test'
39
+ opt.no_dropout = True
40
+ opt.num_threads = 0
41
+ opt.batch_size = 1
42
+ opt.no_flip = True
43
+ model = create_model(opt)
44
+ model.setup(opt)
45
+ model.eval()
46
+
47
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
48
+ image = torch.from_numpy(image) # Convert image from numpy to PyTorch tensor
49
+ image = rearrange(image, "h w c -> c h w") # Since PyTorch is channel first
50
+
51
+ # Perform necessary image transforms
52
+ image = transforms.Resize(256)(image)
53
+ image = transforms.CenterCrop(256)(image).float()/255.
54
+ image = normalize(image)
55
+
56
+ image = rearrange(image, "c h w -> 1 c h w") # Insert batch size of 1 (as required by our model)
57
+
58
+ model.set_input(image)
59
+ model.test()
60
+ visuals = model.get_current_visuals() # get image results
61
+ for i in visuals.values():
62
+ im_data = i
63
+ im = tensor2im(im_data)
64
+ return im
65
+
66
+ gr.Interface(fn=unpaired_img2img,
67
+ inputs=[gr.inputs.Dropdown(['Horse to Zebra', 'Orange to Apple', 'Image to Van Gogh', 'Image to Monet']),
68
+ gr.inputs.Image(shape=(256,256))],
69
+ outputs=gr.outputs.Image(type="numpy"),
70
+ title="Unpaired Image to Image Translation",
71
+ examples=[['Horse to Zebra',"data/horse1.jpg"],
72
+ ['Horse to Zebra',"data/horse3.jpg"],
73
+ ['Orange to Apple', "data/orange1.jpg"],
74
+ ['Orange to Apple', "data/orange2.jpg"],
75
+ ['Image to Van Gogh', "data/img1.jpg"],
76
+ ['Image to Van Gogh', "data/img2.jpg"],
77
+ ['Image to Monet', "data/img1.jpg"],
78
+ ['Image to Monet', "data/img2.jpg"]],
79
+ description="This is a PyTorch implementation of the unpaired image-to-image translation using a pretrained CycleGAN model. Kindly select first the type of translation you wish to see using the dropdown menu. Then, upload the image you wish to translate and click on the 'Submit' button.",
80
+ article="To know more about Unpaired Image to Image Translation and CycleGAN, you may access their <a href = https://paperswithcode.com/paper/unpaired-image-to-image-translation-using>Papers with Code</a> page.",
81
+ allow_flagging="never").launch(inbrowser=True)