File size: 2,788 Bytes
b8d5ba9
946d136
 
b495775
5c95f00
91bcd15
4e49bb7
084ed1c
 
 
18a5c81
084ed1c
bce2c8c
084ed1c
946d136
 
 
 
084ed1c
946d136
 
 
084ed1c
946d136
312c3a7
 
 
084ed1c
946d136
 
 
084ed1c
946d136
 
 
 
 
a0144b2
946d136
 
 
 
 
 
 
 
 
 
084ed1c
946d136
 
084ed1c
946d136
 
 
 
 
 
 
 
 
a0144b2
946d136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f72ff5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import torch
import gradio as gr
import numpy as np
import nltk
nltk.download('wordnet')
nltk.download('omw-1.4')
from PIL import Image
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
                                       save_as_images, display_in_terminal)
initial_archi = 'biggan-deep-128' #@param ['biggan-deep-128', 'biggan-deep-256', 'biggan-deep-512'] {allow-input: true}
initial_class = 'dog'

gan_model = BigGAN.from_pretrained(initial_archi)

def generate_images (initial_archi, initial_class, batch_size):
    truncation = 0.4
    class_vector = one_hot_from_names(initial_class, batch_size=batch_size)
    noise_vector = truncated_noise_sample(truncation=truncation, batch_size=batch_size)

    # All in tensors
    noise_vector = torch.from_numpy(noise_vector)
    class_vector = torch.from_numpy(class_vector)

    # If you have a GPU, put everything on cuda
    #noise_vector = noise_vector.to('cuda')
    #class_vector = class_vector.to('cuda')
    #gan_model.to('cuda')

    # Generate an image
    with torch.no_grad():
        output = gan_model(noise_vector, class_vector, truncation)

    # If you have a GPU put back on CPU
    output = output.to('cpu')
    save_as_images(output)
    return output
    
def convert_to_images(obj):
    """ Convert an output tensor from BigGAN in a list of images.
        Params:
            obj: tensor or numpy array of shape (batch_size, channels, height, width)
        Output:
            list of Pillow Images of size (height, width)
    """
    try:
        import PIL
    except ImportError:
        raise ImportError("Please install Pillow to use images: pip install Pillow")

    if not isinstance(obj, np.ndarray):
        obj = obj.detach().numpy()

    obj = obj.transpose((0, 2, 3, 1))
    obj = np.clip(((obj + 1) / 2.0) * 256, 0, 255)

    img = []
    for i, out in enumerate(obj):
        out_array = np.asarray(np.uint8(out), dtype=np.uint8)
        img.append(PIL.Image.fromarray(out_array))
    return img
    
def inference(initial_archi, initial_class):
    output = generate_images (initial_archi, initial_class, 1)
    PIL_output = convert_to_images(output)
    return PIL_output[0]
  


title = "BigGAN"
description = "BigGAN using various architecture models to generate images."
article="Coming soon"

examples = [
  ["biggan-deep-128", "dog"],
  ["biggan-deep-256", 'dog'],
  ["biggan-deep-512", 'dog']
]

gr.Interface(inference, 
             inputs=[gr.inputs.Dropdown(["biggan-deep-128", "biggan-deep-256", "biggan-deep-512"]), "text"], 
             outputs= [gr.outputs.Image(type="pil",label="output")], 
             examples=examples, 
             title=title, 
             description=description, 
             article=article).launch( debug=True)