# import torch, uvicorn, base64, io
# from fastapi import FastAPI
# from langchain_core.runnables import RunnableLambda
# from pydantic import BaseModel
# from diffusers import StableDiffusionPipeline

# pipe = StableDiffusionPipeline.from_pretrained(
#     "./stable-diffusion-v1-5/stable-diffusion-v1-5 ",
#     torch_dtype=torch.float16
# ).to("cuda")

# def txt2img(prompt: str) -> str:
#     image = pipe(prompt, num_inference_steps=20).images[0]
#     buf = io.BytesIO()
#     image.save(buf, format="PNG")
#     return base64.b64encode(buf.getvalue()).decode()

# chain = RunnableLambda(txt2img)

# app = FastAPI(title="Diffusers T2I")
# class Prompt(BaseModel):
#     prompt: str

# @app.post("/text2image")
# async def generate(p: Prompt):
#     return {"image_b64": chain.invoke(p.prompt)}

# if __name__ == "__main__":
#     uvicorn.run("diffusers_service:app", host="0.0.0.0", port=25063)

# import torch, base64, io, uvicorn
# from fastapi import FastAPI
# from langchain_core.runnables import RunnableLambda
# from pydantic import BaseModel
# from diffusers import FluxPipeline

# # 1. 加载权重（首次会自动下载 12 GB）
# pipe = FluxPipeline.from_pretrained(
#     "./FLUX.1-Krea-dev",
#     torch_dtype=torch.bfloat16
# ).to("cuda")

# # 可选：显存不足时启用 CPU offload
# pipe.enable_model_cpu_offload()

# def krea_generate(prompt: str) -> str:
#     image = pipe(
#         prompt,
#         height=1024,
#         width=1024,
#         guidance_scale=4.5,
#         num_inference_steps=28
#     ).images[0]
#     buf = io.BytesIO()
#     image.save(buf, format="PNG")
#     return base64.b64encode(buf.getvalue()).decode()

# chain = RunnableLambda(krea_generate)

# # 2. FastAPI 暴露接口
# app = FastAPI(title="Krea-Dev Local")
# class Prompt(BaseModel):
#     prompt: str

# @app.post("/generate")
# async def generate(p: Prompt):
#     return {"image_b64": chain.invoke(p.prompt)}

# if __name__ == "__main__":
#     uvicorn.run("t2i:app", host="0.0.0.0", port=25063, reload=True)

import torch, base64, io
import uvicorn
from fastapi import FastAPI
from langchain_core.runnables import RunnableLambda
from pydantic import BaseModel
from diffusers import StableDiffusion3Pipeline

# 1. 加载 SD3.5-large（首次会自动下载 ~13 GB）
pipe = StableDiffusion3Pipeline.from_pretrained(
    "./stable-diffusion-3.5-large",
    torch_dtype=torch.bfloat16
).to("cuda")

# 可选：显存不足时启用顺序 offload（比 enable_model_cpu_offload 更省）
pipe.enable_sequential_cpu_offload()

def sd35_generate(prompt: str) -> str:
    image = pipe(
        prompt,
        height=1024,
        width=1024,
        guidance_scale=5.0,
        num_inference_steps=28,
        max_sequence_length=512   # 允许最长 512 token
    ).images[0]
    buf = io.BytesIO()
    image.save(buf, format="PNG")
    return base64.b64encode(buf.getvalue()).decode()

chain = RunnableLambda(sd35_generate)

# 2. FastAPI 暴露接口
app = FastAPI(title="SD3.5 Local")
class Prompt(BaseModel):
    prompt: str

@app.post("/generate")
async def generate(p: Prompt):
    return {"image_b64": chain.invoke(p.prompt)}

if __name__ == "__main__":
    uvicorn.run("t2i:app", host="0.0.0.0", port=25603, reload=True)