import os import io #import IPython.display from PIL import Image import base64 from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file hf_api_key = os.environ['HF_API_KEY'] # Helper function import requests, json import gradio as gr #A helper function to convert the PIL image to base64 # so you can send it to the API def base64_to_pil(img_base64): base64_decoded = base64.b64decode(img_base64) byte_stream = io.BytesIO(base64_decoded) pil_image = Image.open(byte_stream) return pil_image def generate(prompt, negative_prompt, steps, guidance, width, height): params = { "negative_prompt": negative_prompt, "num_inference_steps": steps, "guidance_scale": guidance, "width": width, "height": height } end_point_url="https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" output = get_completion(prompt, params,end_point_url) pil_image = base64_to_pil(output) return pil_image #Text-to-image endpoint def get_completion(inputs, parameters=None, end_point_url=None): headers = { "Authorization": f"Bearer {hf_api_key}", "Content-Type": "application/json" } data = { "inputs": inputs } if parameters is not None: data.update({"parameters": parameters}) response = requests.request("POST", end_point_url, headers=headers, data=json.dumps(data)) print ("response:",response) print("response.content:",response.content) return json.loads(response.content.decode("utf-8")) API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" headers = {"Authorization": "Bearer "+hf_api_key} #nel corso usa chiamata a generate che puntava ad un endpoit diverso. se usa quella chiamata non va (incompatibilità endpoint-funzione?) # questa è identica a quella suggerita dal sito Use this model with the Inference API. profata in simple_api_call.py def my_generate(payload, negative_prompt, steps, guidance, width, height): payload_input={"inputs": payload} #print("payload:",payload_input) response= requests.post(API_URL, headers=headers, json=payload_input) #print("response.content:",response.content) image = Image.open(io.BytesIO(response.content)) return image with gr.Blocks() as demo: gr.Markdown("# Image Generation with Stable Diffusion") with gr.Row(): with gr.Column(scale=4): prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate with gr.Column(scale=1, min_width=50): btn = gr.Button("Submit") #Submit button side by side! with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options! negative_prompt = gr.Textbox(label="Negative prompt") with gr.Row(): with gr.Column(): steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, value=25, info="In many steps will the denoiser denoise the image?") guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7, info="Controls how much the text prompt influences the result") with gr.Column(): width = gr.Slider(label="Width", minimum=64, maximum=512, step=64, value=512) height = gr.Slider(label="Height", minimum=64, maximum=512, step=64, value=512) output = gr.Image(label="Result") #Move the output up too btn.click(fn=my_generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output]) demo.launch(share=False, server_port=8081)