File size: 3,064 Bytes
bd94aa2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3fe0b34
d56c8ea
 
bd94aa2
 
 
4fd0935
bd94aa2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
845eacd
 
 
bd94aa2
845eacd
bd94aa2
 
 
 
 
 
 
 
 
 
845eacd
 
 
 
 
bd94aa2
 
 
 
 
4fd0935
bd94aa2
 
845eacd
 
bd94aa2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
845eacd
 
bd94aa2
 
 
 
 
 
 
 
4fd0935
bd94aa2
 
 
845eacd
 
bd94aa2
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from fastapi import FastAPI, Request, Form, File, UploadFile
from fastapi.responses import StreamingResponse
from contextlib import asynccontextmanager
from starlette.middleware.cors import CORSMiddleware

from PIL import Image
from io import BytesIO
from diffusers import (
    AutoPipelineForText2Image,
    AutoPipelineForImage2Image,
    AutoPipelineForInpainting,
)


@asynccontextmanager
async def lifespan(app: FastAPI):
    text2img = AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo").to(
        "cpu"
    )

    img2img = AutoPipelineForImage2Image.from_pipe(text2img).to("cpu")

    inpaint = AutoPipelineForInpainting.from_pipe(img2img).to("cpu")

    yield {"text2img": text2img, "img2img": img2img, "inpaint": inpaint}

    del text2img
    del img2img
    del inpaint


app = FastAPI(lifespan=lifespan)

origins = ["*"]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


@app.get("/")
async def root():
    return {"Hello": "World"}


@app.post("/text-to-image/")
async def text_to_image(
    request: Request, prompt: str = Form(...), num_inference_steps: int = Form(1)
):
    image = request.state.text2img(
        prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=0.0
    ).images[0]

    bytes = BytesIO()
    image.save(bytes, "PNG")
    bytes.seek(0)
    return StreamingResponse(bytes, media_type="image/png")


@app.post("/image-to-image/")
async def image_to_image(
    request: Request,
    prompt: str = Form(...),
    init_image: UploadFile = File(...),
    num_inference_steps: int = Form(2),
    strength: float = Form(0.5),
):
    bytes = await init_image.read()
    init_image = Image.open(BytesIO(bytes))
    init_image = init_image.convert("RGB").resize((512, 512))

    image = request.state.img2img(
        prompt,
        image=init_image,
        num_inference_steps=num_inference_steps,
        strength=strength,
        guidance_scale=0.0,
    ).images[0]

    bytes = BytesIO()
    image.save(bytes, "PNG")
    bytes.seek(0)
    return StreamingResponse(bytes, media_type="image/png")


@app.post("/inpainting/")
async def inpainting(
    request: Request,
    prompt: str = Form(...),
    init_image: UploadFile = File(...),
    mask_image: UploadFile = File(...),
    num_inference_steps: int = Form(3),
    strength: float = Form(0.5),
):
    bytes = await init_image.read()
    init_image = Image.open(BytesIO(bytes))
    init_image = init_image.convert("RGB").resize((512, 512))
    bytes = await mask_image.read()
    mask_image = Image.open(BytesIO(bytes))
    mask_image = mask_image.convert("RGB").resize((512, 512))

    image = request.state.inpaint(
        prompt,
        image=init_image,
        mask_image=mask_image,
        num_inference_steps=num_inference_steps,
        strength=strength,
        guidance_scale=0.0,
    ).images[0]

    bytes = BytesIO()
    image.save(bytes, "PNG")
    bytes.seek(0)
    return StreamingResponse(bytes, media_type="image/png")