
from my_py_toolkit.mllms.tokenizers.image_tokenizers import ImageTokenizerGeneral
from my_py_toolkit.file.file_toolkit import *
from my_py_toolkit.decorator.decorator import fn_timer
from tqdm import tqdm
import sys
import torch
from torch.utils.data import Dataset
from torch.utils import data
from tqdm import tqdm

"""
img2token.

示例：
 CUDA_VISIBLE_DEVICES=1 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 1000000 1500000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=1 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 1500000 2000000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=1 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 2000000 2500000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=1 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 2500000 3000000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=1 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 3000000 3500000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=2 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 3500000 4000000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=2 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 4000000 4500000 > nohup.out.journeydb &
 CUDA_VISIBLE_DEVICES=2 nohup python img2token.py ./journeydb.txt.handled.handled 500 .img_str 4500000 5000000 > nohup.out.journeydb &
"""


def readfile(file_path):
  """"""
  result = []
  p_bar = tqdm()
  p_bar.set_description('read lines:')
  with open(file_path, "r", encoding="utf-8") as f:
    line = f.readline()
    while line:
       line_cts += 1
       result.append(line)
       line = f.readline().strip('\n')
       p_bar.update(1)
    p_bar.close()
    return result
  

class ImageDataset(Dataset):
    def __init__(self, data_dir=None, suffix=None, start_ids=0, end_ids=-1):
        super().__init__()
        self.files = []
        if os.path.isfile(data_dir):
            self.files = readfile(data_dir)
        elif os.path.isdir(data_dir):
            self.files = get_file_paths(data_dir, ['jpg', 'jpeg', 'png', 'bmp'])

        self.files = self.files[start_ids:end_ids]

        self.suffix = suffix
        if suffix:
            self.files = [file for file in self.files 
                          if not os.path.exists(file  + self.suffix)]
        

    
    def __getitem__(self, idx):
        image_path = self.files[idx]
        return image_path
    
    def __len__(self):
        return len(self.files)

def get_dataloader(data_dir, bs=2, suffix=None, start_ids=0, end_ids=-1):
    dataset = ImageDataset(data_dir, suffix, start_ids, end_ids)
    dl = data.DataLoader(dataset, bs)
    return dl

@fn_timer()
def write_img_strs(paths, img_strs):
    if not isinstance(paths, (list, tuple)):
        paths = [paths]
    if not isinstance(img_strs, (list, tuple)):
        img_strs = [img_strs]

    for path, img_str in zip(paths, img_strs):
        new_path = path + '.img_str'
        with open(new_path, 'w', encoding='utf-8') as w:
            w.write(img_str)

def main():
    img2token_model_path = '/home/centos/.cache/huggingface/hub/models--AILab-CVC--seed-tokenizer-2/snapshots/c6d957a9b280d9ed3879b1eb5d99036cf8390012/seed_quantizer.pt'
    img2token_model_cls_or_name = 'seed'
    token2img_model_path = '/home/centos/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-unclip/snapshots/e99f66a92bdcd1b0fb0d4b6a9b81b3b37d8bea44'
    token2img_model_cls_or_name = 'stable_unclip'
    tokenizer = ImageTokenizerGeneral(img2token_model_path, img2token_model_cls_or_name, token2img_model_path, token2img_model_cls_or_name)

    bs = 100
    suffix = None
    data_dir = sys.argv[1]
    start_ids = 0
    end_ids = -1
    if len(sys.argv) > 2:
        bs = int(sys.argv[2])
    if len(sys.argv) > 3:
        suffix = sys.argv[3]
    if len(sys.argv) > 4:
        start_ids = int(sys.argv[4])
    if len(sys.argv) > 5:
        end_ids = int(sys.argv[5])

    dl = get_dataloader(data_dir, bs, suffix, start_ids, end_ids)
    for paths in tqdm(dl):
        img_strs = tokenizer.encode_img2text(paths)
        write_img_strs(paths, img_strs)


if __name__ == '__main__':
    main()


