ysharma HF staff commited on
Commit
91ce201
1 Parent(s): bf46a4e
Files changed (1) hide show
  1. app.py +22 -71
app.py CHANGED
@@ -1,43 +1,3 @@
1
- """import gradio as gr
2
- import PIL
3
- from PIL import Image
4
-
5
-
6
-
7
- def asis(img):
8
- # Open an image
9
- #img = Image.open("example.jpg")
10
- # Get the original size of the image
11
- original_size = img.size
12
- # Calculate the new size of the image
13
- new_size = (int(original_size[0]/2), int(original_size[1]/2))
14
- # Resize the image
15
- img1 = img.resize(new_size)
16
- img2 = img.resize(new_size, resample=Image.LANCZOS)
17
- # Save the resized image
18
- #img.save("resized_example.jpg")
19
- return img, img1, img2,
20
-
21
- with gr.Blocks() as demo:
22
- img_in = gr.Image(type='pil') #, shape=(512,512))
23
- with gr.Row():
24
- img_out = gr.Image(type='pil', label='as is') # ,shape=(512,512))
25
- img_out1 = gr.Image(type='pil', label='resizing to half') # ,shape=(512,512))
26
- img_out2 = gr.Image(type='pil', label='resize with resample') # ,shape=(512,512))
27
- #with gr.Row():
28
- # with gr.Column():
29
- # image_in = gr.Image(type='pil', label="Original Image")
30
- # text_in = gr.Textbox()
31
- # state_in = gr.State()
32
- b1 = gr.Button('Run')
33
- # chatbot = gr.Chatbot()
34
- b1.click(asis,img_in,[img_out, img_out1, img_out2])
35
-
36
- #demo.queue(concurrency_count=10)
37
- demo.launch(debug=True) # width="80%", height=1500)
38
- """
39
-
40
-
41
  import PIL
42
  import requests
43
  import torch
@@ -57,35 +17,40 @@ pipe.enable_attention_slicing()
57
  counter = 0
58
 
59
 
60
- help_text = """ """
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  def previous(image):
63
  return image
64
 
65
  def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid, img_name, counter_out, image_oneup, prompt, history, progress=gr.Progress(track_tqdm=True)):
66
  progress(0, desc="Starting...")
67
- if prompt == 'reverse' : #--to add revert functionality later
68
  history = history or []
69
- #Resizing (or not) the image for better display and adding supportive sample text
70
- #add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
71
- #if counter_out > 0:
72
  temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
73
  image_oneup.save(temp_img_name)
74
  response = 'Reverted to the last image ' + '<img src="/file=' + temp_img_name + '">'
75
  history.append((prompt, response))
76
  return history, history, image_oneup, temp_img_name, counter_out
77
- if prompt == 'restart' : #--to add revert functionality later
78
  history = history or []
79
- #Resizing (or not) the image for better display and adding supportive sample text
80
- #add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
81
- #if counter_out > 0:
82
  temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
83
  image_in.save(temp_img_name)
84
  response = 'Reverted to the last image ' + '<img src="/file=' + temp_img_name + '">'
85
  history.append((prompt, response))
86
  return history, history, image_in, temp_img_name, counter_out
87
- # Save the resized image
88
- #img.save("resized_example.jpg", optimize=True, quality=95
89
  if counter_out > 0:
90
  edited_image = pipe(prompt, image=image_hid, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
91
  if os.path.exists(img_name):
@@ -102,17 +67,11 @@ def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid
102
  else:
103
  seed = random.randint(0, 1000000)
104
  img_name = f"./edited_image_{seed}.png"
 
105
  basewidth = 512
106
  wpercent = (basewidth/float(image_in.size[0]))
107
  hsize = int((float(image_in.size[1])*float(wpercent)))
108
  image_in = image_in.resize((basewidth,hsize), Image.Resampling.LANCZOS)
109
- # Get the original size of the image
110
- #original_size = image_in.size
111
- # Calculate the new size of the image
112
- #new_size = (int(original_size[0]/2), int(original_size[1]/2))
113
- # Resize the image
114
- #img1 = img.resize(new_size)
115
- #image_in = image_in.resize(new_size,Image.ANTIALIAS) # resample=Image.LANCZOS)
116
  edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
117
  if os.path.exists(img_name):
118
  os.remove(img_name)
@@ -134,17 +93,10 @@ def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid
134
  counter_out += 1
135
  return history, history, edited_image, img_name, counter_out
136
 
 
137
 
138
  with gr.Blocks() as demo:
139
- gr.Markdown("""<h1><center> Chat Interface with InstructPix2Pix: Give Image Editing Instructions</h1></center>
140
- <p>For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings.<br/>
141
- <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true">
142
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
143
- **Note: Please be advised that a safety checker has been implemented in this public space.
144
- Any attempts to generate inappropriate or NSFW images will result in the display of a black screen
145
- as a precautionary measure for the protection of all users. We appreciate your cooperation in
146
- maintaining a safe and appropriate environment for all members of our community.**
147
- <p/>""")
148
  with gr.Row():
149
  with gr.Column():
150
  image_in = gr.Image(type='pil', label="Original Image")
@@ -158,8 +110,8 @@ with gr.Blocks() as demo:
158
  in_steps = gr.Number(label="Enter the number of Inference steps", value = 20)
159
  in_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Guidance scale", value=7.5)
160
  in_img_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Image Guidance scale", value=1.5)
161
- image_hid = gr.Image(type='pil', visible=True)
162
- image_oneup = gr.Image(type='pil', visible=True)
163
  img_name_temp_out = gr.Textbox(visible=False)
164
  #img_revert = gr.Checkbox(visible=True, value=False,label=to track a revert message)
165
  counter_out = gr.Number(visible=False, value=0, precision=0)
@@ -170,5 +122,4 @@ with gr.Blocks() as demo:
170
  gr.Markdown(help_text)
171
 
172
  demo.queue(concurrency_count=10)
173
- demo.launch(debug=True, width="80%", height=2000)
174
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import PIL
2
  import requests
3
  import torch
 
17
  counter = 0
18
 
19
 
20
+ help_text = """ Some notes from the official [instruct-pix2pix](https://huggingface.co/spaces/timbrooks/instruct-pix2pix) Space by the authors and from the official [Diffusers docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix) -
21
+ If you're not getting what you want, there may be a few reasons:
22
+ 1. Is the image not changing enough? Your guidance_scale may be too low. It should be >1. Higher guidance scale encourages to generate images
23
+ that are closely linked to the text `prompt`, usually at the expense of lower image quality. This value dictates how similar the output should
24
+ be to the input. This pipeline requires a value of at least `1`. It's possible your edit requires larger changes from the original image.
25
+
26
+ 2. Alternatively, you can toggle image_guidance_scale. Image guidance scale is to push the generated image towards the inital image. Image guidance
27
+ scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to generate images that are closely
28
+ linked to the source image `image`, usually at the expense of lower image quality.
29
+ 3. I have observed that rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog").
30
+ 4. Increasing the number of steps sometimes improves results.
31
+ 5. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try:
32
+ * Cropping the image so the face takes up a larger portion of the frame.
33
+ """
34
 
35
  def previous(image):
36
  return image
37
 
38
  def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid, img_name, counter_out, image_oneup, prompt, history, progress=gr.Progress(track_tqdm=True)):
39
  progress(0, desc="Starting...")
40
+ if prompt.lower() == 'reverse' : #--to add revert functionality later
41
  history = history or []
 
 
 
42
  temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
43
  image_oneup.save(temp_img_name)
44
  response = 'Reverted to the last image ' + '<img src="/file=' + temp_img_name + '">'
45
  history.append((prompt, response))
46
  return history, history, image_oneup, temp_img_name, counter_out
47
+ if prompt.lower() == 'restart' : #--to add revert functionality later
48
  history = history or []
 
 
 
49
  temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
50
  image_in.save(temp_img_name)
51
  response = 'Reverted to the last image ' + '<img src="/file=' + temp_img_name + '">'
52
  history.append((prompt, response))
53
  return history, history, image_in, temp_img_name, counter_out
 
 
54
  if counter_out > 0:
55
  edited_image = pipe(prompt, image=image_hid, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
56
  if os.path.exists(img_name):
 
67
  else:
68
  seed = random.randint(0, 1000000)
69
  img_name = f"./edited_image_{seed}.png"
70
+ #Resizing the image
71
  basewidth = 512
72
  wpercent = (basewidth/float(image_in.size[0]))
73
  hsize = int((float(image_in.size[1])*float(wpercent)))
74
  image_in = image_in.resize((basewidth,hsize), Image.Resampling.LANCZOS)
 
 
 
 
 
 
 
75
  edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
76
  if os.path.exists(img_name):
77
  os.remove(img_name)
 
93
  counter_out += 1
94
  return history, history, edited_image, img_name, counter_out
95
 
96
+
97
 
98
  with gr.Blocks() as demo:
99
+ gr.Markdown("""<h1><center>dummy</h1></center> """)
 
 
 
 
 
 
 
 
100
  with gr.Row():
101
  with gr.Column():
102
  image_in = gr.Image(type='pil', label="Original Image")
 
110
  in_steps = gr.Number(label="Enter the number of Inference steps", value = 20)
111
  in_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Guidance scale", value=7.5)
112
  in_img_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Image Guidance scale", value=1.5)
113
+ image_hid = gr.Image(type='pil', visible=False)
114
+ image_oneup = gr.Image(type='pil', visible=False)
115
  img_name_temp_out = gr.Textbox(visible=False)
116
  #img_revert = gr.Checkbox(visible=True, value=False,label=to track a revert message)
117
  counter_out = gr.Number(visible=False, value=0, precision=0)
 
122
  gr.Markdown(help_text)
123
 
124
  demo.queue(concurrency_count=10)
125
+ demo.launch(debug=True, width="80%", height=2000)