michaelj's picture
add
1e2deb3
raw
history blame
1.61 kB
from fastapi import FastAPI,Body
import uvicorn
import json
from PIL import Image
import time
from constants import DESCRIPTION, LOGO
from model import get_pipeline
from utils import replace_background
from diffusers.utils import load_image
import base64
import io
from datetime import datetime
app = FastAPI(name="mutilParam")
pipeline = get_pipeline()
#Endpoints
#Root endpoints
@app.get("/")
def root():
return {"API": "Sum of 2 Squares"}
@app.post("/img2img")
async def predict(prompt=Body(...),imgbase64data=Body(...)):
MAX_QUEUE_SIZE = 4
start = time.time()
print("参数",imgbase64data,prompt)
image_data = base64.b64decode(imgbase64data)
image1 = Image.open(io.BytesIO(image_data))
w, h = image1.size
newW = 256
newH = int(h * newW / w)
img = image1.resize((newW, newH))
end1 = time.time()
now = datetime.now()
print(now)
print("图像:", img.size)
print("加载管道:", end1 - start)
result = pipeline(
prompt=prompt,
image=image1,
strength=0.6,
seed=10,
width=256,
height=256,
guidance_scale=1,
num_inference_steps=4,
)
output_image = result.images[0]
end2 = time.time()
print("测试",output_image)
print("s生成完成:", end2 - end1)
# 将图片对象转换为bytes
output_image_base64 = base64.b64encode(output_image.tobytes()).decode()
print("完成的图片:", output_image_base64)
return output_image_base64
@app.post("/predict")
async def predict(prompt=Body(...)):
return f"您好,{prompt}"