

import os
import onnxruntime
import torch
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as plt



input_path = 'checkpoint'
tags_path = os.path.join(input_path, 'tags.txt')
model_path = os.path.join(input_path, 'model.onnx')
generator_path = os.path.join(input_path, 'Gs.pth')
device =  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
batch_size = 4
seed = 0



# let's run one image to checkout if it works
C = onnxruntime.InferenceSession(model_path)



with open(tags_path, 'r') as tags_stream:
    tags = np.array([tag for tag in (tag.strip() for tag in tags_stream) if tag])



import stylegan2
from stylegan2 import utils

G = stylegan2.models.load(generator_path, map_location=device)
G.to(device)



def to_image_tensor(image_tensor, pixel_min=-1, pixel_max=1):
    if pixel_min != 0 or pixel_max != 1:
        image_tensor = (image_tensor - pixel_min) / (pixel_max - pixel_min)
    return image_tensor.clamp(min=0, max=1)



torch.manual_seed(seed)

qlatents = torch.randn(1, G.latent_size).to(device=device, dtype=torch.float32)
generated = G(qlatents)
images = to_image_tensor(generated)
# 299 is the input size of the model
images = F.interpolate(images, size=(299, 299), mode='bilinear')
ort_inputs = {C.get_inputs()[0].name: images.detach().cpu().numpy()}
[predicted_labels] = C.run(None, ort_inputs)
# print out some tags
plt.imshow(images[0].detach().cpu().permute(1, 2, 0))
labels = [tags[i] for i, score in enumerate(predicted_labels[0]) if score > 0.5]
print(labels)



# reset seed
torch.manual_seed(seed)
iteration = 5000

progress = utils.ProgressWriter(iteration)
progress.write('Generating images...', step=False)

qlatents_data = torch.Tensor(0, G.latent_size).to(device=device, dtype=torch.float32)
dlatents_data = torch.Tensor(0, 16, G.latent_size).to(device=device, dtype=torch.float32)
labels_data = torch.Tensor(0, len(tags)).to(device=device, dtype=torch.float32)
for i in range(iteration):
    qlatents = torch.randn(batch_size, G.latent_size).to(device=device, dtype=torch.float32)
    with torch.no_grad():
        generated, dlatents = G(latents=qlatents, return_dlatents=True)
        # inplace to save memory
        generated = to_image_tensor(generated)
        # 299 is the input size of the model
        # resize the image to 299 * 299
        images = F.interpolate(generated, size=(299, 299), mode='bilinear')
        labels = []
        ## tagger does not take input as batch, need to feed one by one
        for image in images:
            ort_inputs = {C.get_inputs()[0].name: image.reshape(1, 3, 299, 299).detach().cpu().numpy()}
            [[predicted_labels]] = C.run(None, ort_inputs)
            labels.append(predicted_labels)
        # store the result
        labels_tensor = torch.Tensor(labels).to(device=device, dtype=torch.float32)
        qlatents_data = torch.cat((qlatents_data, qlatents))
        dlatents_data = torch.cat((dlatents_data, dlatents))
        labels_data = torch.cat((labels_data, labels_tensor))

        progress.step()

progress.write('Done!', step=False)
progress.close()



torch.save({
    'qlatents_data': qlatents_data.cpu(),
    'dlatents_data': dlatents_data.cpu(),
    'labels_data': labels_data.cpu(),
    'tags': tags
}, 'latents.pth')




