dhanushreddy29 commited on
Commit
62a39be
1 Parent(s): b2868e0

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +181 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ from PIL import Image
5
+ from io import BytesIO
6
+ from base64 import b64encode
7
+
8
+ SEGMIND_MODEL_URL = "https://api.segmind.com/v1/inpaint-auto"
9
+
10
+ def urlToB64(imgUrl):
11
+ return str(b64encode(requests.get(imgUrl).content))[2:-1]
12
+
13
+ def imageToB64(img):
14
+ buffered = BytesIO()
15
+ img.save(buffered, format="JPEG")
16
+ return str(b64encode(buffered.getvalue()))[2:-1]
17
+
18
+ def generate_image(
19
+ upload_method,
20
+ img_url,
21
+ uploaded_img,
22
+ prompt,
23
+ negative_prompt,
24
+ cn_model,
25
+ cn_processor,
26
+ base_model
27
+ ):
28
+ if upload_method == "URL":
29
+ if not img_url:
30
+ raise ValueError("Image URL is required.")
31
+ img_b64 = urlToB64(img_url)
32
+ else:
33
+ if not uploaded_img:
34
+ raise ValueError("Image upload is required.")
35
+ img_b64 = imageToB64(uploaded_img)
36
+
37
+ data = {
38
+ "image": img_b64,
39
+ "prompt": prompt,
40
+ "negative_prompt": negative_prompt,
41
+ "samples": 1,
42
+ "base_model": base_model,
43
+ "cn_model": cn_model,
44
+ "cn_processor": cn_processor,
45
+ "scheduler": "DPM++ 2M SDE Karras",
46
+ "num_inference_steps": 25,
47
+ "guidance_scale": 7.5,
48
+ "seed": -1,
49
+ "strength": 0.9,
50
+ "base64": False,
51
+ }
52
+ response = requests.post(
53
+ SEGMIND_MODEL_URL,
54
+ json=data,
55
+ headers={"x-api-key": os.environ['SEGMIND_API_KEY']}
56
+ )
57
+ output_img = Image.open(BytesIO(response.content))
58
+
59
+ return output_img
60
+
61
+
62
+ def invertBox(upload_method):
63
+ # Return gr.update objects with visibility settings
64
+ if upload_method == "URL":
65
+ return gr.update(visible=True), gr.update(visible=False)
66
+ else:
67
+ return gr.update(visible=False), gr.update(visible=True)
68
+
69
+ with gr.Blocks() as demo:
70
+ gr.Markdown("### Photo Background Changer")
71
+ gr.Markdown(
72
+ "Change the bavkground of the image in one click to anything that you can imagine"
73
+ )
74
+ with gr.Row():
75
+ upload_method = gr.Radio(
76
+ choices=["URL", "Upload"], label="Choose Image Upload Method", value="URL"
77
+ )
78
+ img_url = gr.Textbox(label="Image URL")
79
+ uploaded_img = gr.Image(type="pil", label="Upload Image", visible=False)
80
+ upload_method.change(
81
+ invertBox, inputs=upload_method, outputs=[img_url, uploaded_img]
82
+ )
83
+ with gr.Row():
84
+ prompt = gr.Textbox(label="Prompt")
85
+ negative_prompt = gr.Textbox(
86
+ label="Negative Prompt",
87
+ value="disfigured, deformed, ugly, floating in air, blur, haze, uneven edges, improper blending, animated, cartoon",
88
+ )
89
+ with gr.Row():
90
+ cn_model = gr.Dropdown(
91
+ label="Select Controlnet Model",
92
+ choices=["Canny", "Depth", "SoftEdge", "OpenPose"],
93
+ value="Depth",
94
+ )
95
+ cn_processor = gr.Dropdown(
96
+ label="Select Controlnet Processor",
97
+ choices=[
98
+ "canny",
99
+ "depth",
100
+ "depth_leres",
101
+ "depth_leres++",
102
+ "hed",
103
+ "hed_safe",
104
+ "mediapipe_face",
105
+ "mlsd",
106
+ "normal_map",
107
+ "openpose",
108
+ "openpose_hand",
109
+ "openpose_face",
110
+ "openpose_faceonly",
111
+ "openpose_full",
112
+ "dw_openpose_full",
113
+ "animal_openpose",
114
+ "clip_vision",
115
+ "revision_clipvision",
116
+ "revision_ignore_prompt",
117
+ "ip-adapter_clip_sd15",
118
+ "ip-adapter_clip_sdxl_plus_vith",
119
+ "ip-adapter_clip_sdxl",
120
+ "color",
121
+ "pidinet",
122
+ "pidinet_safe",
123
+ "pidinet_sketch",
124
+ "pidinet_scribble",
125
+ "scribble_xdog",
126
+ "scribble_hed",
127
+ "segmentation",
128
+ "threshold",
129
+ "depth_zoe",
130
+ "normal_bae",
131
+ "oneformer_coco",
132
+ "oneformer_ade20k",
133
+ "lineart",
134
+ "lineart_coarse",
135
+ "lineart_anime",
136
+ "lineart_standard",
137
+ "shuffle",
138
+ "tile_resample",
139
+ "invert",
140
+ "lineart_anime_denoise",
141
+ "reference_only",
142
+ "reference_adain",
143
+ "reference_adain+attn",
144
+ "inpaint",
145
+ "inpaint_only",
146
+ "inpaint_only+lama",
147
+ "tile_colorfix",
148
+ "tile_colorfix+sharp",
149
+ "recolor_luminance",
150
+ "recolor_intensity",
151
+ "blur_gaussian",
152
+ "anime_face_segment",
153
+ ],
154
+ value="canny",
155
+ )
156
+ with gr.Row():
157
+ base_model = gr.Dropdown(
158
+ label="Select Base SD Model to use",
159
+ choices=["Real Vision XL", "SDXL", "Juggernaut XL", "DreamShaper XL"],
160
+ value="Juggernaut XL",
161
+ )
162
+ with gr.Row():
163
+ generate_btn = gr.Button("Generate Image")
164
+ output_image = gr.Image(type="pil")
165
+
166
+ generate_btn.click(
167
+ fn=generate_image,
168
+ inputs=[
169
+ upload_method,
170
+ img_url,
171
+ uploaded_img,
172
+ prompt,
173
+ negative_prompt,
174
+ cn_model,
175
+ cn_processor,
176
+ base_model
177
+ ],
178
+ outputs=[output_image],
179
+ )
180
+
181
+ demo.launch(debug=True)