test / pipeline.py
miittnnss's picture
Update pipeline.py
f600034
raw
history blame
1.78 kB
import torch
import torch.nn as nn
from torchvision import transforms
from PIL import Image
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d(128, 64 * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(64 * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(64 * 4, 64 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(64 * 2, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
class PretrainedPipeline():
def __init__(self):
self.device = torch.device("cpu")
self.generator = Generator() # Instantiate your GAN generator class
self.generator.load_state_dict(torch.load("generator.pth", map_location=self.device))
self.generator.eval()
def generate_image(self):
with torch.no_grad():
noise = torch.randn(1, 128, 1, 1).to(self.device) # Assuming input noise size is 100
generated_image_tensor = self.generator(noise)
generated_image = generated_image_tensor.cpu().detach().squeeze(0)
# Assuming the generator output is in the range [-1, 1]
generated_image = (generated_image + 1) / 2.0
pil_image = transforms.ToPILImage()(generated_image)
return pil_image