import os
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torchvision.utils import save_image

'''
可以说GD分类器训练完毕后就是完全独立的两个模型，可以分别进行推理使用，一般我们需要使用的不是分辨网络，
而是生成器。
'''
# Hyper-parameters
latent_size = 64
hidden_size = 256
image_size = 784
num_epochs = 200
batch_size = 100
sample_dir = 'samples'

# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model_path = "G.ckpt"

def denorm(x):
    out = (x + 1) / 2
    return out.clamp(0, 1)
# ================================================================== #
# 像素值出现在（-1，1）的原因是：
# transforms.ToTensor()将图片归一化到（0，1）
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])将图片归一化到（-1，1）#
# ================================================================== #

# Generator 
G = nn.Sequential(
    nn.Linear(latent_size, hidden_size),#64->256
    nn.ReLU(),
    nn.Linear(hidden_size, hidden_size),#256->256
    nn.ReLU(),
    nn.Linear(hidden_size, image_size),#256->784
    nn.Tanh())

# Device setting
G = G.to(device)

G.load_state_dict(torch.load(model_path))

#准备噪声
z = torch.randn(batch_size, latent_size).to(device)#高斯随机分布数据按生成模型输入的size给出
fake_images = G(z)
# Save sampled images
fake_images = fake_images.reshape(fake_images.size(0), 1, 28, 28)
save_image(denorm(fake_images), os.path.join(sample_dir, 'inference_fake_images.png'))
