import sys
sys.path.append('.')

import os
from reflow.data.utils import LMDB_ndarray
from reflow.data.dataset import get_reflow_dataset
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import torch
from pathlib import Path
from diffusers import AltDiffusionPipeline
from loguru import logger

if __name__ == "__main__":
    data_root = 'data/test_data_gen'
    shuffle = False
    bs = 4
    batch_selected = [1]
    save_dir = 'samples/free_sample/latent_decode'
    save_dir = Path(save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    if (save_dir / 'captions.txt').exists():
        os.remove(str(save_dir / 'captions.txt'))

    device = 'cuda:1'
    ds = get_reflow_dataset(
        data_root=data_root,
        src_type='lmdb',
    )
    dl = DataLoader(ds, batch_size=bs, shuffle=shuffle)

    dtype = torch.float16
    pipe = AltDiffusionPipeline.from_pretrained(
        "checkpoints/AltDiffusion",
        torch_dtype=dtype,
        safety_checker=None,
        requires_safety_checker=False,
    )
    pipe = pipe.to(device)

    now_idx=1
    for i, batch in tqdm(enumerate(dl, start=1)):
        if i not in batch_selected:
            continue
        else:
            batch_selected.pop(0)
            with torch.no_grad():
                latents = batch['latent'].to(device=device, dtype=dtype)
                # latents = batch['noise'].to(device=device, dtype=dtype)

                images = pipe.decode_latents(latents)
                images = pipe.numpy_to_pil(images)
                for img_idx, img in enumerate(images, start=now_idx):
                    img.save(str(save_dir / f'{img_idx}.png'))
                logger.info(f'save images done')

                captions = batch['caption']
                with open(str(save_dir / 'captions.txt'), 'a') as f:
                    f.write('\n'.join(captions)+'\n')
                logger.info(f'save captions done')
            now_idx+=bs
            if len(batch_selected)==0:
                break
