"""
采用多进程并发请求SD3.5生成图像:
    1.因为SD 3.5 模型运行是计算密集型的 GPU 操作，受制于 Python GIL 和 CUDA 的上下文机制，只有多进程才能实现真正的并行，且保障稳定性。
    2.每个进程拥有独立的 Python 解释器、内存空间和 CUDA 上下文，从系统角度是完全隔离的

"""
import os
import json
import torch
import random
from diffusers import DiffusionPipeline
from multiprocessing import Process, Queue, current_process
from tqdm import tqdm
import sys
import contextlib
import time

# === 配置 ===
NUM_GPUS = 4
IP = 45
INPUT_JSON = "/data02/xjh/poem/split_0.json" # 45-0
OUTPUT_JSON = f"/data02/xjh/poem/aug_poems_img_{IP}.json"
IMGS_DIR = "/data02/xjh/poem/poem_imgs"
NUM_IMGS_PER_POEM = 3  # 每首诗生成3张图像
os.makedirs(IMGS_DIR, exist_ok=True)

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["TOKENIZERS_PARALLELISM"] = "false"

@contextlib.contextmanager
def suppress_stdout():
    """屏蔽 stdout 输出"""
    with open(os.devnull, "w") as fnull:
        old_stdout = sys.stdout
        old_stderr = sys.stderr
        sys.stdout = fnull
        sys.stderr = fnull
        try:
            yield
        finally:
            sys.stdout = old_stdout
            sys.stderr = old_stderr


# === 子进程任务 ===
def run_worker(gpu_id, task_queue, result_queue):
    print(f"[GPU {gpu_id}] 子进程 {current_process().name} 启动，初始化 pipeline ...")
    
    # 设置当前 GPU
    os.environ["HIP_VISIBLE_DEVICES"] = str(gpu_id)
    torch.cuda.set_device(gpu_id)
    device = torch.device(f"cuda:{gpu_id}")

    pipeline = DiffusionPipeline.from_pretrained(
        "stabilityai/stable-diffusion-3.5-large",
        torch_dtype=torch.bfloat16,
        local_files_only=True
    )
    pipeline.tokenizer.model_max_length = 128
    pipeline = pipeline.to(device)

    while not task_queue.empty():
        try:
            poem_id, data = task_queue.get(timeout=3)
        except:
            break
        
        # 存入 json 的元数据 | 与彭师弟的格式保持一致(去掉了seed等不重要的字段)
        metadata = {
            "prompt": data["prompt"],
            "negative_prompt": data["negative_prompt"],
            "cfg": 3.5,
            "steps": 28,
            "sampler": "fmeds",
            "msrc": "stablediffusion3.5large",
            "psrc": "diffusiondb",
            "platform": "k100",
            "qt": "bf16",
            "width": 1024,
            "height": 1024,  
        }
        
        for i in range(5):
            out_filename = f"{poem_id}_{i}.webp"
            output_path = os.path.join(IMGS_DIR, out_filename)
            if os.path.exists(output_path):
                # print(f"✅ [GPU {gpu_id}] 已存在 {out_filename}，跳过") # 如果文件已存在则跳过，不再生成
                result_queue.put((out_filename, metadata, None)) # 将已有的文件信息放入结果队列
                continue

            try:
                seed = random.randint(0, 9999999999)
                generator = torch.Generator(device=device).manual_seed(seed)

                # print(f"🎨 [GPU {gpu_id}] 生成图像：{out_filename}")
                
                with suppress_stdout():  # 屏蔽sd3.5模型的输出
                    image = pipeline(
                        prompt=data["prompt"],
                        negative_prompt=data["negative_prompt"],
                        num_inference_steps=28,
                        guidance_scale=3.5,
                        generator=generator
                    ).images[0]
                image.save(output_path, format="WEBP")
                # print(f"✅ [GPU {gpu_id}] 完成：{output_path}")
                result_queue.put((out_filename, metadata, None))
            except Exception as e:
                # print(f"❌ [GPU {gpu_id}] 失败 {out_filename}: {e}")
                result_queue.put((out_filename, metadata, str(e)))


# === 主进程逻辑 ===
def main():
    if os.path.exists(OUTPUT_JSON):
        with open(OUTPUT_JSON, "r", encoding="utf-8") as f:
            print(f"📝 读取已存在的结果文件：{OUTPUT_JSON}")
            existing_results = json.load(f)
    else:
        print(f"📝 未找到结果文件 {OUTPUT_JSON}，将创建新的结果文件")
        existing_results = {}

    with open(INPUT_JSON, "r", encoding="utf-8") as f:
        data = json.load(f)

    all_tasks = []
    for pid, pdata in data.items():
        if pdata.get("flag") is True: # 只处理有效的诗句
            for i in range(NUM_IMGS_PER_POEM): # 每首诗生成5张图像
                out_filename = f"{pid}_{i}.webp"
                if out_filename not in existing_results: # 如果结果中不存在该图像，则添加到任务列表
                    all_tasks.append((pid, pdata))
                    break  # 有任意一张缺失就全部重新生成5张
    
    total_tasks = len(all_tasks) * 5 # 每首诗生成5张图像
    print(f"🚀 剩余图片数：{total_tasks}")

    task_queues = [Queue() for _ in range(NUM_GPUS)]
    result_queue = Queue()

    for poem_id, poem_data in all_tasks:
        assigned_gpu = hash(poem_id) % NUM_GPUS
        task_queues[assigned_gpu].put((poem_id, poem_data))

    # 启动子进程
    workers = []
    for gpu_id in range(NUM_GPUS): # 每个 GPU 启动一个子进程
        p = Process(target=run_worker, args=(gpu_id, task_queues[gpu_id], result_queue), name=f"Worker-GPU-{gpu_id}")
        p.start()
        workers.append(p)

    save_every = 100  # 每 100 条写入一次
    processed = 0 # 已处理的条目数
    print(f"🚀 🚀 🚀 🚀 🚀 🚀 🚀 🚀 🚀 ")
    []
    # tqdm 进度条
    pbar = tqdm(total=total_tasks, desc="进度", ncols=100)

    while True:
        all_done = all(not p.is_alive() for p in workers) # 检查所有子进程是否都已结束
        got_result = False # 是否有新结果被处理

        while not result_queue.empty(): # 队列不为空时持续处理结果
            try:
                fname, metadata, err = result_queue.get_nowait() # 非阻塞获取结果并移除队首的元素
                got_result = True
                if metadata and not err:
                    existing_results[fname] = metadata
                    processed += 1 # 每生成一张图像就增加计数
                    pbar.update(1)

                if processed % save_every == 0:
                    with open(OUTPUT_JSON, "w", encoding="utf-8") as f:
                        json.dump(existing_results, f, ensure_ascii=False, indent=2)
                    print(f"📝 已增量保存至 {OUTPUT_JSON}，当前已完成 {processed} 条")
            except Exception:
                break # 队列为空 = 暂时没有新结果

        if all_done and not got_result: # 如果所有子进程都结束且没有新结果，则退出循环
            break
    
    pbar.close()
    
    # 等待所有子进程结束
    for p in workers:
        p.join()
    
    # 所有结果处理完成后，再最终写入一次
    with open(OUTPUT_JSON, "w", encoding="utf-8") as f:
        json.dump(existing_results, f, ensure_ascii=False, indent=2)

    print(f"🎉 图像生成完成，结果保存至：{OUTPUT_JSON}")


if __name__ == "__main__":
    import multiprocessing
    multiprocessing.set_start_method("spawn", force=True)
    main()
