dhanushreddy29's picture
Upload folder using huggingface_hub
62a39be verified
raw
history blame contribute delete
No virus
5.45 kB
import os
import gradio as gr
import requests
from PIL import Image
from io import BytesIO
from base64 import b64encode
SEGMIND_MODEL_URL = "https://api.segmind.com/v1/inpaint-auto"
def urlToB64(imgUrl):
return str(b64encode(requests.get(imgUrl).content))[2:-1]
def imageToB64(img):
buffered = BytesIO()
img.save(buffered, format="JPEG")
return str(b64encode(buffered.getvalue()))[2:-1]
def generate_image(
upload_method,
img_url,
uploaded_img,
prompt,
negative_prompt,
cn_model,
cn_processor,
base_model
):
if upload_method == "URL":
if not img_url:
raise ValueError("Image URL is required.")
img_b64 = urlToB64(img_url)
else:
if not uploaded_img:
raise ValueError("Image upload is required.")
img_b64 = imageToB64(uploaded_img)
data = {
"image": img_b64,
"prompt": prompt,
"negative_prompt": negative_prompt,
"samples": 1,
"base_model": base_model,
"cn_model": cn_model,
"cn_processor": cn_processor,
"scheduler": "DPM++ 2M SDE Karras",
"num_inference_steps": 25,
"guidance_scale": 7.5,
"seed": -1,
"strength": 0.9,
"base64": False,
}
response = requests.post(
SEGMIND_MODEL_URL,
json=data,
headers={"x-api-key": os.environ['SEGMIND_API_KEY']}
)
output_img = Image.open(BytesIO(response.content))
return output_img
def invertBox(upload_method):
# Return gr.update objects with visibility settings
if upload_method == "URL":
return gr.update(visible=True), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=True)
with gr.Blocks() as demo:
gr.Markdown("### Photo Background Changer")
gr.Markdown(
"Change the bavkground of the image in one click to anything that you can imagine"
)
with gr.Row():
upload_method = gr.Radio(
choices=["URL", "Upload"], label="Choose Image Upload Method", value="URL"
)
img_url = gr.Textbox(label="Image URL")
uploaded_img = gr.Image(type="pil", label="Upload Image", visible=False)
upload_method.change(
invertBox, inputs=upload_method, outputs=[img_url, uploaded_img]
)
with gr.Row():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="disfigured, deformed, ugly, floating in air, blur, haze, uneven edges, improper blending, animated, cartoon",
)
with gr.Row():
cn_model = gr.Dropdown(
label="Select Controlnet Model",
choices=["Canny", "Depth", "SoftEdge", "OpenPose"],
value="Depth",
)
cn_processor = gr.Dropdown(
label="Select Controlnet Processor",
choices=[
"canny",
"depth",
"depth_leres",
"depth_leres++",
"hed",
"hed_safe",
"mediapipe_face",
"mlsd",
"normal_map",
"openpose",
"openpose_hand",
"openpose_face",
"openpose_faceonly",
"openpose_full",
"dw_openpose_full",
"animal_openpose",
"clip_vision",
"revision_clipvision",
"revision_ignore_prompt",
"ip-adapter_clip_sd15",
"ip-adapter_clip_sdxl_plus_vith",
"ip-adapter_clip_sdxl",
"color",
"pidinet",
"pidinet_safe",
"pidinet_sketch",
"pidinet_scribble",
"scribble_xdog",
"scribble_hed",
"segmentation",
"threshold",
"depth_zoe",
"normal_bae",
"oneformer_coco",
"oneformer_ade20k",
"lineart",
"lineart_coarse",
"lineart_anime",
"lineart_standard",
"shuffle",
"tile_resample",
"invert",
"lineart_anime_denoise",
"reference_only",
"reference_adain",
"reference_adain+attn",
"inpaint",
"inpaint_only",
"inpaint_only+lama",
"tile_colorfix",
"tile_colorfix+sharp",
"recolor_luminance",
"recolor_intensity",
"blur_gaussian",
"anime_face_segment",
],
value="canny",
)
with gr.Row():
base_model = gr.Dropdown(
label="Select Base SD Model to use",
choices=["Real Vision XL", "SDXL", "Juggernaut XL", "DreamShaper XL"],
value="Juggernaut XL",
)
with gr.Row():
generate_btn = gr.Button("Generate Image")
output_image = gr.Image(type="pil")
generate_btn.click(
fn=generate_image,
inputs=[
upload_method,
img_url,
uploaded_img,
prompt,
negative_prompt,
cn_model,
cn_processor,
base_model
],
outputs=[output_image],
)
demo.launch(debug=True)