File size: 2,981 Bytes
9065ffb b8cbe27 9065ffb 5178b70 9065ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
---
license: unknown
---
it's like [imagenet.int8](https://huggingface.co/datasets/cloneofsimo/imagenet.int8) but
* train+val in1k (1,331,168 samples)
* flux-dev vae, latent (after dequant) channels scaled to N(0,1)
* quantization uses int8 not uint8 (scaling factor 127/4)
basic decode test:
```python
# huggingface-cli download --repo-type dataset main-horse/in1k.int8 --revision flux-1.0-dev --local-dir ./imagenet_int8
import torch
from streaming import StreamingDataset
import streaming.base.util as util
from diffusers import AutoencoderKL
from diffusers.image_processor import VaeImageProcessor
# Constants from normalization
CHANNEL_MEANS = torch.tensor([
-0.008, -1.337, 0.335, -0.077, -0.134, 0.320, -1.196, 0.545,
-0.159, 0.284, 0.584, 0.062, -0.319, 0.001, -0.859, -0.246
], device='cuda')
CHANNEL_STDS = torch.tensor([
1.996, 3.531, 2.036, 1.428, 1.510, 1.710, 3.108, 2.410,
1.810, 2.670, 1.711, 1.941, 2.648, 2.734, 2.250, 2.479
], device='cuda')
def unnormalize_latents(x: torch.Tensor) -> torch.Tensor:
"""Undo the N(0,1) normalization"""
return x * CHANNEL_STDS[:,None] + CHANNEL_MEANS[:,None]
# Clean up any stale shared memory
util.clean_stale_shared_memory()
# Load the int8 dataset
remote_dir = "./imagenet_int8"
local_dir = "./local_test_dir2"
dataset = StreamingDataset(
local=local_dir,
remote=remote_dir,
split=None,
shuffle=False,
batch_size=32
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, num_workers=0)
# Get the 5th sample
batch = next(iter(dataloader))
i = 5 # Get 5th sample
# Convert int8 latent back to float and reshape
latent = batch['latent'][i].reshape(16, 1024).cuda().float()
label = batch['label'][i].item()
print(f"Processing sample {i} with label {label}")
print(f"Latent shape before processing: {latent.shape}")
# First undo the int8 quantization by scaling
latent = latent / 31.75 # Should now be ~N(0,1)
# Then undo the normalization to get back to original distribution
latent = unnormalize_latents(latent)
# Reshape to VAE expected format (1, 16, 32, 32)
latent = latent.reshape(1, 16, 32, 32)
print(f"Final latent shape: {latent.shape}")
print(f"Latent stats after denorm: min={latent.min().item():.3f}, max={latent.max().item():.3f}, mean={latent.mean().item():.3f}, std={latent.std().item():.3f}")
# Load and set up VAE
vae = AutoencoderKL.from_pretrained('black-forest-labs/FLUX.1-dev', subfolder='vae', device_map=0, attn_implementation='sdpa')
processor = VaeImageProcessor(vae_scale_factor=2 ** (len(vae.config.block_out_channels) - 1))
# Decode and save
with torch.no_grad():
decoded = vae.decode(latent).sample
img = processor.postprocess(decoded, do_denormalize=[True, True])[0]
img.save("5th_image_from_int8.png")
print("Saved decoded image as 5th_image_from_int8.png")
```
You should get something like this:

open an issue if you see any technical problems (license-related issues not welcome) |