"""
读取json文件（/root/xjh/hsw_poem_data/aug_poem/aug_poems_2.json）中每一个flag为true的数据项的prompt和negative_prompt，传入SD3.5进行生图，并将图片保存在"/root/xjh/data02_poem/poem_imgs"目录下，图片文件命名为数据项的id名，

并将图片路径保存在json文件的新增的“img_path”字段中，另存为新的json文件（/root/xjh/hsw_poem_data/aug_poem/aug_poems_img.json），其余字段和原json文件保持一致，

忽略并跳过掉所有flag=false的数据项。

采用多进程并发请求SD3.5生成图像:
    1.因为SD 3.5 模型运行是计算密集型的 GPU 操作，受制于 Python GIL 和 CUDA 的上下文机制，只有多进程才能实现真正的并行，且保障稳定性。
    2.每个进程拥有独立的 Python 解释器、内存空间和 CUDA 上下文，从系统角度是完全隔离的

"""
import os
import json
import torch
from diffusers import DiffusionPipeline
from multiprocessing import Process, Queue, current_process
import multiprocessing

# === 配置 ===
NUM_GPUS = 4
INPUT_JSON = "/root/xjh/hsw_poem_data/aug_poem/aug_poems_2.json"
OUTPUT_JSON = "/root/xjh/data02_poem/aug_poems_img.json"
IMGS_DIR = "/root/xjh/data02_poem/poem_imgs"
os.makedirs(IMGS_DIR, exist_ok=True)

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# === 子进程任务 ===
def run_worker(gpu_id, task_queue, result_queue):
    print(f"[GPU {gpu_id}] 子进程 {current_process().name} 启动，初始化 pipeline ...")
    os.environ["HIP_VISIBLE_DEVICES"] = str(gpu_id)
    torch.cuda.set_device(gpu_id)
    device = torch.device(f"cuda:{gpu_id}")

    pipeline = DiffusionPipeline.from_pretrained(
        "stabilityai/stable-diffusion-3.5-large",
        torch_dtype=torch.bfloat16,
        local_files_only=True
    )
    pipeline.tokenizer.model_max_length = 128
    pipeline = pipeline.to(device)
    generator = torch.Generator(device=device).manual_seed(1234567890 + gpu_id)

    while not task_queue.empty():
        try:
            poem_id, data = task_queue.get(timeout=3)
        except:
            break

        output_path = os.path.join(IMGS_DIR, f"{poem_id}.png")
        if os.path.exists(output_path):
            print(f"✅ [GPU {gpu_id}] 已存在 {poem_id}.png，跳过")
            result_queue.put((poem_id, output_path))
            continue

        try:
            print(f"🎨 [GPU {gpu_id}] 生成图像：{poem_id}")
            image = pipeline(
                prompt=data["prompt"],
                negative_prompt=data["negative_prompt"],
                num_inference_steps=28,
                guidance_scale=3.5,
                generator=generator
            ).images[0]
            image.save(output_path)
            print(f"✅ [GPU {gpu_id}] 完成：{output_path}")
            result_queue.put((poem_id, output_path))
        except Exception as e:
            print(f"❌ [GPU {gpu_id}] 失败 {poem_id}: {e}")
            result_queue.put((poem_id, None))


# === 主进程逻辑 ===
def main():
    # 加载原始 JSON
    with open(INPUT_JSON, "r", encoding="utf-8") as f:
        data = json.load(f)

    # 筛选任务项
    all_tasks = [(pid, pdata) for pid, pdata in data.items() if pdata.get("flag") is True]
    print(f"🚀 总任务数：{len(all_tasks)}")

    # 初始化任务队列与结果队列
    task_queues = [Queue() for _ in range(NUM_GPUS)]
    result_queue = Queue()

    # 分配任务：简单 hash 分配
    for poem_id, poem_data in all_tasks:
        assigned_gpu = hash(poem_id) % NUM_GPUS
        task_queues[assigned_gpu].put((poem_id, poem_data))

    # 启动子进程
    workers = []
    for gpu_id in range(NUM_GPUS):
        p = Process(target=run_worker, args=(gpu_id, task_queues[gpu_id], result_queue), name=f"Worker-GPU-{gpu_id}")
        p.start()
        workers.append(p)

    # 等待子进程结束
    for p in workers:
        p.join()

    # 写入 img_path 字段
    while not result_queue.empty():
        poem_id, img_path = result_queue.get()
        if img_path:
            data[poem_id]["img_path"] = img_path

    # 保存更新后的 JSON
    with open(OUTPUT_JSON, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

    print(f"🎉 所有图像已生成，结果保存至：{OUTPUT_JSON}")


if __name__ == "__main__":
    multiprocessing.set_start_method("spawn")
    main()
