# main 函数：加载 diffusion 和 blip2 pipeline , 持续 sample 图片 -> auto caption ; 直到达到目标个数，将所有产生的 caption 保存在一个 list 里面返回
# 函数外，将所有返回的 list 拼接，存储到一个 outcome.txt 中；

# TODO : 添加功能：同时保存使用的 original text 和 sample image 

from argparse import Namespace
from reflow.utils import _PIPELINES, _SCHEDULERS
from copy import deepcopy
import torch
from torch.utils.data import Dataset, DataLoader
from reflow.utils import set_seed, cycle, nothing
from pathlib import Path
import json
from tqdm import tqdm
from transformers import Blip2Processor, Blip2ForConditionalGeneration
import multiprocessing as mp
import os
from typing import List
from PIL import Image

def split_integer(num, n):
    """
    将一个整数 num 平均分成 n 份，并存储在一个列表中返回。

    Args:
        num: 要分割的整数。
        n: 分割的份数。

    Returns:
        包含 n 个整数的列表，每个整数是将 num 平均分成 n 份后的结果。
    """
    quotient = num // n  # 计算商
    remainder = num % n  # 计算余数
    # 构造结果列表
    result = [quotient] * n
    for i in range(remainder):
        result[i] += 1
    return result


class PromptDS(Dataset):
    def __init__(self, prompt_path,):
        all_prompt = open(prompt_path, 'r', encoding='utf-8').read().splitlines()
        self.all_prompt = all_prompt

    def __getitem__(self, i):
        return self.all_prompt[i]

    def __len__(self):
        return len(self.all_prompt)

def save_image(images:List[Image.Image], image_dir:Path, start_idx:int):
    for idx, image in enumerate(images):
        image.save(str(image_dir / f'{start_idx+idx}.png'))

def main(rank, device, nums, save_path: Path, args, start_idx):

    set_seed(args.base_rnd + rank)

    # 加载 diffusers pipeline
    pipeline_cls = _PIPELINES[args.diffusers_pipeline]
    scheduler_cls = _SCHEDULERS[args.diffusers_scheduler]

    pipeline = pipeline_cls.from_pretrained(
        args.diffusers_pipeline_ckpt,
        torch_dtype=args.weight_dtype,
        safety_checker=None,
        feature_extractor=None,
        requires_safety_checker=False,
    )
    if args.ckpt_path:
        pipeline.unet.load_state_dict(torch.load(args.ckpt_path))
    pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
    pipeline.vae.load_state_dict(torch.load(
        "checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.bin"))
    pipeline = pipeline.to(device)

    pipeline.enable_xformers_memory_efficient_attention()

    # 加载 blip2 pipeline
    processor = Blip2Processor.from_pretrained(args.blip_pipeline_path)
    model = Blip2ForConditionalGeneration.from_pretrained(
        args.blip_pipeline_path, torch_dtype=args.weight_dtype)
    model = model.to(device)

    ds = PromptDS(args.prompt_path)
    dl = DataLoader(ds, batch_size=args.bs, shuffle=True, )
    dl_iter = cycle(dl)
    pbar = tqdm(range(nums), total=nums, disable=(rank != 0))  # 只有主进程显示进度

    original_captions = []
    auto_captions = []
    sample_cnt = 0
    for prompts in dl_iter:
        images = pipeline(
            prompt=prompts,
            # latents=noise,
            num_inference_steps=args.inference_steps,
            guidance_scale=args.guidance_scale,
            disable_pbar=True,
        ).images
        inputs = processor(images, ["a photo of"]*len(images),return_tensors="pt").to(device, dtype=args.weight_dtype)
        out = model.generate(**inputs, max_new_tokens=60)
        auto_caption = processor.batch_decode(
            out, skip_special_tokens=True, clean_up_tokenization_spaces=True)
        auto_caption = [c.strip() for c in auto_caption]

        if sample_cnt + len(auto_caption) > nums:
            auto_caption = auto_caption[:nums-sample_cnt]
            prompts = prompts[:nums-sample_cnt]
            images = images[:nums-sample_cnt]
        auto_captions.extend(auto_caption)
        original_captions.extend(prompts)
        if args.save_images:
            save_image(images, save_path / 'images', start_idx)
            start_idx += len(images)

        sample_cnt += len(auto_caption)
        pbar.update(args.bs)
        if sample_cnt >= nums:
            break
    pbar.close()
    print(f"write auto captions to file <<{str(save_path / f'auto_caption_part{rank}.txt')}>> (by process {rank})")
    with (save_path / f'auto_caption_part{rank}.txt').open('w', encoding='utf-8') as f:
        f.write('\n'.join(auto_captions))
        
    print(f"write original captions to file <<{str(save_path / f'original_caption_part{rank}.txt')}>> (by process {rank})")
    with (save_path / f'original_caption_part{rank}.txt').open('w', encoding='utf-8') as f:
        f.write('\n'.join(original_captions))




def merge_text_files(folder_path, output_file_name="auto_caption"):
    # 构造输出文件路径
    output_file_path = os.path.join(folder_path, f"{output_file_name}.txt")
    # 遍历文件夹中的文件，读取文本内容并合并
    with open(output_file_path, "w", encoding='utf-8') as output_file:
        for file_name in os.listdir(folder_path):
            if file_name.startswith(f"{output_file_name}_part") and file_name.endswith(".txt"):
                file_path = os.path.join(folder_path, file_name)
                with open(file_path, "r", encoding='utf-8') as input_file:
                    content = input_file.read()
                    output_file.write(content)
                    output_file.write("\n")
                os.remove(file_path)

def save_args(args, save_path):
    args_to_save = deepcopy(args)
    args_to_save.weight_dtype = args_to_save.weight_dtype.__str__()
    json.dump(vars(args_to_save), open(str(save_path / "args.json"), 'w', encoding='utf-8'))
    
    

def convert_captions_to_clipsim(save_path:Path, file_name='auto_caption'):
    file_path = str(save_path / f'{file_name}.txt')
    captions = open(file_path, 'r', encoding='utf-8').read().splitlines()
    content_to_save = {str(i):caption for i, caption in enumerate(captions)}
    json.dump(content_to_save, (save_path / f'{file_name}.json').open('w', encoding='utf-8'))
    
def convert_images_to_clipsim(save_path:Path):
    image_dir = save_path / 'images'
    """在 images 文件夹下面是从 0 开始编号的 png 文件；为每个图片创建一个文件夹"""
    for image_name in tqdm(os.listdir(str(image_dir)), desc="Images"):
        str_idx = image_name.split('.')[0]
        (image_dir / str_idx).mkdir(parents=True, exist_ok=True)
        os.system(f"mv {str(image_dir / image_name)} {str(image_dir / str_idx / '0.png')}")


def prepare_args():
    args = Namespace()
    args.diffusers_pipeline = 'stable_diffusion'
    args.diffusers_scheduler = 'dpm_solver_multi'
    args.diffusers_pipeline_ckpt = 'checkpoints/SD-1-4'
    args.ckpt_path = None
    # args.ckpt_path = "logs/pokemon/distill/init2Reflow_s5000/checkpoints/score_model_s5000.pth"
    
    if not nothing(args.ckpt_path):
        assert args.diffusers_scheduler == "euler_dummy"

    args.blip_pipeline_path = "checkpoints/blip2-opt-2.7b-coco"

    args.weight_dtype = torch.float16
    args.inference_steps = 25
    args.guidance_scale = 7.5

    # 设置一个 base random seed ; 每个 process 的 rnd = base_rnd + rank
    # 这样每个 process 接收同样的 prompt 库，但是能 random 到不同的 prompt
    args.base_rnd = 58305685

    args.prompt_path = "data/reflow/laion6+_random1M.txt"  # 足够大

    args.total_nums = 50
    args.save_path = "samples/metric_sample/clipsim_test"

    args.gpu_ids = [0,1]
    args.bs = 3
    
    args.save_images = True
    args.to_clipsim_format = True

    return args

if __name__ == "__main__":
    mp.set_start_method('spawn') # RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method
    
    args = prepare_args()
    save_path = Path(args.save_path)
    save_path.mkdir(parents=True, exist_ok=True)  # 创建目标文件夹

    # 保存所有的 args
    save_args(args, save_path)
    
    if args.save_images:
        (save_path / 'images').mkdir(parents=True, exist_ok=True)

    # 分配任务数量
    task_nums = split_integer(args.total_nums, len(args.gpu_ids))
    
    idx_count = 0

    workers = []
    for rank, (gpu_id, nums) in enumerate(zip(args.gpu_ids, task_nums)):
        device = f'cuda:{gpu_id}'
        p = mp.Process(target=main, args=(
            rank, device, nums, save_path, args, idx_count))
        p.start()
        workers.append(p)
        idx_count += nums

    for p in workers:
        p.join()
        
    merge_text_files(str(save_path), output_file_name="auto_caption")
    merge_text_files(str(save_path), output_file_name="original_caption")
    
    if args.to_clipsim_format:
        print('convert captions (and images) to CLIPSim computation format')
        convert_captions_to_clipsim(save_path, "auto_caption")
        convert_captions_to_clipsim(save_path, "original_caption")
        if args.save_images:
            convert_images_to_clipsim(save_path)
