stable-diffusion / text_to_image.py
Dmitry Trifonov
use just endpoint in text to image demo
b6fedf9
import base64
from io import BytesIO
import requests
from PIL import Image
ENDPOINT_ADDRESS = "http://35.233.231.20:5000"
def text_to_image(prompt):
inputs = {
"modelInputs": {
"prompt": prompt,
"num_inference_steps": 25,
"width": 512,
"height": 512,
},
"callInputs": {
"MODEL_ID": "lykon/dreamshaper-8",
"PIPELINE": "AutoPipelineForText2Image",
"SCHEDULER": "DEISMultistepScheduler",
"PRECISION": "fp16",
"REVISION": "fp16",
},
}
response = requests.post(ENDPOINT_ADDRESS, json=inputs).json()
image_data = BytesIO(base64.b64decode(response["image_base64"]))
image = Image.open(image_data)
return image
if __name__ == "__main__":
image = text_to_image(prompt="Robot dinosaur")
image.save("result.png")