Narsil HF staff commited on
Commit
6cfd1be
1 Parent(s): 93db298

Enabling xformers.

Browse files
Files changed (2) hide show
  1. app.py +119 -52
  2. requirements.txt +2 -1
app.py CHANGED
@@ -6,13 +6,19 @@ import random
6
  from PIL import Image
7
  import os
8
  import time
9
- from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
 
 
 
10
 
11
- #Loading from Diffusers Library
12
  model_id = "timbrooks/instruct-pix2pix"
13
- pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16") #, safety_checker=None)
 
 
14
  pipe.to("cuda")
15
- pipe.enable_attention_slicing()
 
16
 
17
  counter = 0
18
 
@@ -39,49 +45,85 @@ be to the input. This pipeline requires a value of at least `1`. It's possible y
39
  * Cropping the image so the face takes up a larger portion of the frame.
40
  """
41
 
42
- def chat(image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid, img_name, counter_out, prompt, history, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
43
  progress(0, desc="Starting...")
44
- #if message == "revert": --to add revert functionality later
45
  if counter_out > 0:
46
- edited_image = pipe(prompt, image=image_hid, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
47
- if os.path.exists(img_name):
48
- os.remove(img_name)
49
- temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
50
- # Create a file-like object
51
- with open(temp_img_name, "wb") as fp:
52
- # Save the image to the file-like object
53
- edited_image.save(fp)
54
- #Get the name of the saved image
55
- saved_image_name = fp.name
56
- #edited_image.save(temp_img_name) #, overwrite=True)
57
- counter_out += 1
 
 
 
 
 
 
58
  else:
59
- seed = random.randint(0, 1000000)
60
- img_name = f"./edited_image_{seed}.png"
61
- edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
62
- if os.path.exists(img_name):
63
- os.remove(img_name)
64
- with open(img_name, "wb") as fp:
65
- # Save the image to the file-like object
66
- edited_image.save(fp)
67
- #Get the name of the saved image
68
- saved_image_name2 = fp.name
 
 
 
 
 
 
69
  history = history or []
70
- #Resizing (or not) the image for better display and adding supportive sample text
71
- add_text_list = ["There you go", "Enjoy your image!", "Nice work! Wonder what you gonna do next!", "Way to go!", "Does this work for you?", "Something like this?"]
 
 
 
 
 
 
 
72
  if counter_out > 0:
73
- response = random.choice(add_text_list) + '<img src="/file=' + saved_image_name + '">'
 
 
74
  history.append((prompt, response))
75
  return history, history, edited_image, temp_img_name, counter_out
76
  else:
77
- response = random.choice(add_text_list) + '<img src="/file=' + saved_image_name2 + '">' #IMG_NAME
 
 
78
  history.append((prompt, response))
79
  counter_out += 1
80
  return history, history, edited_image, img_name, counter_out
81
-
82
 
83
  with gr.Blocks() as demo:
84
- gr.Markdown("""<h1><center> Chat Interface with InstructPix2Pix: Give Image Editing Instructions</h1></center>
 
85
  <p>For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings.<br/>
86
  <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true">
87
  <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
@@ -89,24 +131,49 @@ with gr.Blocks() as demo:
89
  Any attempts to generate inappropriate or NSFW images will result in the display of a black screen
90
  as a precautionary measure for the protection of all users. We appreciate your cooperation in
91
  maintaining a safe and appropriate environment for all members of our community.**
92
- <p/>""")
 
93
  with gr.Row():
94
- with gr.Column():
95
- image_in = gr.Image(type='pil', label="Original Image")
96
- text_in = gr.Textbox()
97
- state_in = gr.State()
98
- b1 = gr.Button('Edit the image!')
99
- with gr.Accordion("Advance settings for Training and Inference", open=False):
100
- gr.Markdown("Advance settings for - Number of Inference steps, Guidanace scale, and Image guidance scale.")
101
- in_steps = gr.Number(label="Enter the number of Inference steps", value = 20)
102
- in_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Guidance scale", value=7.5)
103
- in_img_guidance_scale = gr.Slider(1,10, step=0.5, label="Set Image Guidance scale", value=1.5)
104
- image_hid = gr.Image(type='pil', visible=False)
105
- img_name_temp_out = gr.Textbox(visible=False)
106
- counter_out = gr.Number(visible=False, value=0, precision=0)
107
- chatbot = gr.Chatbot()
108
- b1.click(chat,[image_in, in_steps, in_guidance_scale, in_img_guidance_scale, image_hid, img_name_temp_out,counter_out, text_in, state_in], [chatbot, state_in, image_hid, img_name_temp_out, counter_out]) #, queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  gr.Markdown(help_text)
110
-
111
  demo.queue(concurrency_count=10)
112
- demo.launch(debug=True, width="80%", height=2000)
 
6
  from PIL import Image
7
  import os
8
  import time
9
+ from diffusers import (
10
+ StableDiffusionInstructPix2PixPipeline,
11
+ EulerAncestralDiscreteScheduler,
12
+ )
13
 
14
+ # Loading from Diffusers Library
15
  model_id = "timbrooks/instruct-pix2pix"
16
+ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
17
+ model_id, torch_dtype=torch.float16, revision="fp16"
18
+ ) # , safety_checker=None)
19
  pipe.to("cuda")
20
+ pipe.enable_xformers_memory_efficient_attention()
21
+ pipe.unet.to(memory_format=torch.channels_last)
22
 
23
  counter = 0
24
 
 
45
  * Cropping the image so the face takes up a larger portion of the frame.
46
  """
47
 
48
+
49
+ def chat(
50
+ image_in,
51
+ in_steps,
52
+ in_guidance_scale,
53
+ in_img_guidance_scale,
54
+ image_hid,
55
+ img_name,
56
+ counter_out,
57
+ prompt,
58
+ history,
59
+ progress=gr.Progress(track_tqdm=True),
60
+ ):
61
  progress(0, desc="Starting...")
62
+ # if message == "revert": --to add revert functionality later
63
  if counter_out > 0:
64
+ edited_image = pipe(
65
+ prompt,
66
+ image=image_hid,
67
+ num_inference_steps=int(in_steps),
68
+ guidance_scale=float(in_guidance_scale),
69
+ image_guidance_scale=float(in_img_guidance_scale),
70
+ ).images[0]
71
+ if os.path.exists(img_name):
72
+ os.remove(img_name)
73
+ temp_img_name = img_name[:-4] + str(int(time.time())) + ".png"
74
+ # Create a file-like object
75
+ with open(temp_img_name, "wb") as fp:
76
+ # Save the image to the file-like object
77
+ edited_image.save(fp)
78
+ # Get the name of the saved image
79
+ saved_image_name = fp.name
80
+ # edited_image.save(temp_img_name) #, overwrite=True)
81
+ counter_out += 1
82
  else:
83
+ seed = random.randint(0, 1000000)
84
+ img_name = f"./edited_image_{seed}.png"
85
+ edited_image = pipe(
86
+ prompt,
87
+ image=image_in,
88
+ num_inference_steps=int(in_steps),
89
+ guidance_scale=float(in_guidance_scale),
90
+ image_guidance_scale=float(in_img_guidance_scale),
91
+ ).images[0]
92
+ if os.path.exists(img_name):
93
+ os.remove(img_name)
94
+ with open(img_name, "wb") as fp:
95
+ # Save the image to the file-like object
96
+ edited_image.save(fp)
97
+ # Get the name of the saved image
98
+ saved_image_name2 = fp.name
99
  history = history or []
100
+ # Resizing (or not) the image for better display and adding supportive sample text
101
+ add_text_list = [
102
+ "There you go",
103
+ "Enjoy your image!",
104
+ "Nice work! Wonder what you gonna do next!",
105
+ "Way to go!",
106
+ "Does this work for you?",
107
+ "Something like this?",
108
+ ]
109
  if counter_out > 0:
110
+ response = (
111
+ random.choice(add_text_list) + '<img src="/file=' + saved_image_name + '">'
112
+ )
113
  history.append((prompt, response))
114
  return history, history, edited_image, temp_img_name, counter_out
115
  else:
116
+ response = (
117
+ random.choice(add_text_list) + '<img src="/file=' + saved_image_name2 + '">'
118
+ ) # IMG_NAME
119
  history.append((prompt, response))
120
  counter_out += 1
121
  return history, history, edited_image, img_name, counter_out
122
+
123
 
124
  with gr.Blocks() as demo:
125
+ gr.Markdown(
126
+ """<h1><center> Chat Interface with InstructPix2Pix: Give Image Editing Instructions</h1></center>
127
  <p>For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings.<br/>
128
  <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true">
129
  <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
 
131
  Any attempts to generate inappropriate or NSFW images will result in the display of a black screen
132
  as a precautionary measure for the protection of all users. We appreciate your cooperation in
133
  maintaining a safe and appropriate environment for all members of our community.**
134
+ <p/>"""
135
+ )
136
  with gr.Row():
137
+ with gr.Column():
138
+ image_in = gr.Image(type="pil", label="Original Image")
139
+ text_in = gr.Textbox()
140
+ state_in = gr.State()
141
+ b1 = gr.Button("Edit the image!")
142
+ with gr.Accordion(
143
+ "Advance settings for Training and Inference", open=False
144
+ ):
145
+ gr.Markdown(
146
+ "Advance settings for - Number of Inference steps, Guidanace scale, and Image guidance scale."
147
+ )
148
+ in_steps = gr.Number(
149
+ label="Enter the number of Inference steps", value=20
150
+ )
151
+ in_guidance_scale = gr.Slider(
152
+ 1, 10, step=0.5, label="Set Guidance scale", value=7.5
153
+ )
154
+ in_img_guidance_scale = gr.Slider(
155
+ 1, 10, step=0.5, label="Set Image Guidance scale", value=1.5
156
+ )
157
+ image_hid = gr.Image(type="pil", visible=False)
158
+ img_name_temp_out = gr.Textbox(visible=False)
159
+ counter_out = gr.Number(visible=False, value=0, precision=0)
160
+ chatbot = gr.Chatbot()
161
+ b1.click(
162
+ chat,
163
+ [
164
+ image_in,
165
+ in_steps,
166
+ in_guidance_scale,
167
+ in_img_guidance_scale,
168
+ image_hid,
169
+ img_name_temp_out,
170
+ counter_out,
171
+ text_in,
172
+ state_in,
173
+ ],
174
+ [chatbot, state_in, image_hid, img_name_temp_out, counter_out],
175
+ ) # , queue=True)
176
  gr.Markdown(help_text)
177
+
178
  demo.queue(concurrency_count=10)
179
+ demo.launch(debug=True, width="80%", height=2000, share=True)
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  git+https://github.com/huggingface/diffusers.git
2
  transformers
3
  accelerate
4
- safetensors
 
 
1
  git+https://github.com/huggingface/diffusers.git
2
  transformers
3
  accelerate
4
+ safetensors
5
+ https://huggingface.co/datasets/Narsil/test/resolve/main/xformers-0.0.14.dev0-cp38-cp38-linux_x86_64.whl