File size: 5,396 Bytes
26fd00c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import sys
sys.path.append('../../')
import argparse
import base64
from io import BytesIO
from data.file_dataset import FileDataset
from PIL import Image, ImageFile
from torchvision import transforms
from omegaconf import OmegaConf
from models.taming.models.vqgan import GumbelVQ
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
ImageFile.LOAD_TRUNCATED_IMAGES = True
ImageFile.MAX_IMAGE_PIXELS = None
Image.MAX_IMAGE_PIXELS = None
class VQGANDataset(Dataset):
def __init__(self, file, selected_cols, skip_convert_images=True, image_root=None):
self.reader = FileDataset(
file,
selected_col_ids=selected_cols,
)
self.skip_convert_images = skip_convert_images
self.image_root = image_root
if self.skip_convert_images:
self.code_resize_transform = transforms.Compose([
lambda image: image.convert("RGB"),
transforms.Resize((args.code_image_size,args.code_image_size),interpolation=Image.BICUBIC),
transforms.ToTensor(),
preprocess_vqgan
])
else:
self.code_resize_transform = transforms.Compose([
lambda image: image.convert("RGB"),
transforms.Resize(args.code_image_size, interpolation=Image.LANCZOS),
transforms.ToTensor(),
preprocess_vqgan
])
def __len__(self):
return len(self.reader)
def __getitem__(self, item):
column_l = self.reader[item]
if len(column_l) == 4:
pair_id, image_id, image, text = column_l
elif len(column_l) == 2:
image_id, image = column_l
else:
raise NotImplementedError
if not self.skip_convert_images:
image = Image.open(BytesIO(base64.urlsafe_b64decode(image)))
else:
if self.image_root is not None:
image = os.path.join(self.image_root, image)
image = Image.open(image)
code_image = self.code_resize_transform(image)
if len(column_l) == 4:
return {"code_image": code_image, "pair_id": pair_id, "image_id": image_id, "text": text}
elif len(column_l) == 2:
return {"code_image": code_image, "image_id": image_id}
def custom_to_pil(x):
x = x.detach().cpu()
x = torch.clamp(x, -1., 1.)
x = (x + 1.) / 2.
x = x.permute(1, 2, 0).numpy()
x = (255 * x).astype(np.uint8)
x = Image.fromarray(x)
if not x.mode == "RGB":
x = x.convert("RGB")
return x
def map_pixels(x, eps=0.1):
return (1 - 2 * eps) * x + eps
def preprocess_vqgan(x):
x = 2. * x - 1.
return x
def image_to_base64(img, format):
output_buffer = BytesIO()
img.save(output_buffer, format=format)
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data)
base64_str = str(base64_str, encoding='utf-8')
return base64_str
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str, default="")
parser.add_argument("--outputs", type=str, default="")
parser.add_argument("--selected_cols", type=str, required=True)
parser.add_argument("--code_image_size", type=int, required=True)
parser.add_argument("--vq_model", type=str, required=True)
parser.add_argument("--vqgan_model_path", type=str, default=None)
parser.add_argument("--vqgan_config_path", type=str, default=None)
parser.add_argument("--log_interval", default=100, type=int, help="log interval")
parser.add_argument("--worker_cnt", type=int, default=1)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--skip_convert_images", type=bool, default=False)
parser.add_argument("--image_root", type=str, default=None)
args = parser.parse_args()
vqgan_config = OmegaConf.load(args.vqgan_config_path)
vqgan = GumbelVQ(**vqgan_config.model.params)
sd = torch.load(args.vqgan_model_path, map_location="cpu")["state_dict"]
missing, unexpected = vqgan.load_state_dict(sd, strict=False)
for k, v in vqgan.named_parameters():
v.requires_grad = False
image_tokenizer = vqgan.cuda().eval()
writer = open(args.outputs, 'w')
print("begin process")
data_cnt = 0
dataset = VQGANDataset(args.file, args.selected_cols, skip_convert_images=args.skip_convert_images, image_root=args.image_root)
dataloader = DataLoader(dataset, batch_size=args.batch_size)
for data in dataloader:
batch_size = data["code_image"].size()[0]
with torch.no_grad():
z, _, [_, _, image_codes] = image_tokenizer.encode(data["code_image"].cuda())
image_codes = image_codes.view(batch_size, -1).detach()
for i, image_code in enumerate(image_codes):
code = ' '.join([str(num) for num in image_code.tolist()])
if len(data.keys()) == 4:
writer.write('\t'.join([data['pair_id'][i], data['image_id'][i], data['text'][i], code])+'\n')
elif len(data.keys()) == 2:
writer.write('\t'.join([data['image_id'][i], code])+'\n')
else:
raise NotImplementedError
writer.close()
print("finish")
|