rezaarmand commited on
Commit
a1eec31
1 Parent(s): 80a95b1

first commit

Browse files
Files changed (3) hide show
  1. README.md +9 -9
  2. app.py +166 -0
  3. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Perp Neg
3
- emoji: 🏢
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 3.27.0
8
  app_file: app.py
9
- pinned: false
10
  license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Perp-Neg to edit and generate views using SD
3
+ emoji: 🔥
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 3.12.0
8
  app_file: app.py
9
+ pinned: true
10
  license: apache-2.0
11
+ tags:
12
+ - making-demos
13
+ ---
app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import torch
4
+ import os
5
+ from PIL import Image
6
+ from torch import autocast
7
+ from perpneg_diffusion.perpneg_stable_diffusion.pipeline_perpneg_stable_diffusion import PerpStableDiffusionPipeline
8
+
9
+ has_cuda = torch.cuda.is_available()
10
+ device = torch.device('cpu' if not has_cuda else 'cuda')
11
+ print(device)
12
+
13
+ # initialize stable diffusion model
14
+ pipe = PerpStableDiffusionPipeline.from_pretrained(
15
+ "CompVis/stable-diffusion-v1-4",
16
+ # use_auth_token=True
17
+ ).to(device)
18
+
19
+ def dummy(images, **kwargs):
20
+ return images, False
21
+
22
+
23
+ pipe.safety_checker = dummy
24
+
25
+ examples = [
26
+ [
27
+ "an armchair in the shape of an avocado | cushion in the armchair",
28
+ "1 | -0.3",
29
+ "145",
30
+ "7.5"
31
+ ],
32
+ [
33
+ "an armchair in the shape of an avocado",
34
+ "1",
35
+ "145",
36
+ "7.5"
37
+ ],
38
+ [
39
+ "a peacock, back view | a peacock, front view",
40
+ "1 | -3.5",
41
+ "30",
42
+ "7.5"
43
+ ],
44
+ [
45
+ "a peacock, back view",
46
+ "1",
47
+ "30",
48
+ "7.5"
49
+ ],
50
+ [
51
+ "A boy wearing sunglasses | a pair of sunglasses with white frame",
52
+ "1 | -0.35",
53
+ "200",
54
+ "11"
55
+ ],
56
+ [
57
+ "A boy wearing sunglasses",
58
+ "1",
59
+ "200",
60
+ "11",
61
+ ],
62
+ [
63
+ "a photo of an astronaut riding a horse | a jumping horse | a white horse",
64
+ "1 | -0.3 | -0.1",
65
+ "1988",
66
+ "10"
67
+ ],
68
+ [
69
+ "a photo of an astronaut riding a horse | a jumping horse",
70
+ "1 | -0.3",
71
+ "1988",
72
+ "10"
73
+ ],
74
+ [
75
+ "a photo of an astronaut riding a horse",
76
+ "1",
77
+ "1988",
78
+ "10"
79
+ ],
80
+ ]
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+ def predict(prompt, weights, seed, scale=7.5, steps=50):
89
+ try:
90
+ with torch.no_grad():
91
+ has_cuda = torch.cuda.is_available()
92
+ with autocast('cpu' if not has_cuda else 'cuda'):
93
+ generator = torch.Generator('cuda').manual_seed(int(seed))
94
+ image_perpneg = pipe(prompt, guidance_scale=float(scale), generator=generator,
95
+ num_inference_steps=steps, weights=weights)["images"][0]
96
+ return image_perpneg
97
+ except Exception as e:
98
+ print(e)
99
+ return None
100
+
101
+
102
+
103
+
104
+
105
+
106
+ app = gr.Blocks()
107
+ with app:
108
+ # gr.Markdown(
109
+ # "# **<p align='center'>AMLDS Video Tagging</p>**"
110
+ # )
111
+ gr.Markdown(
112
+ "# **<p align='center'>Perp-Neg: Iterative Editing and Robust View Generation.</p>**"
113
+ )
114
+ gr.Markdown(
115
+ """
116
+ ### **<p align='center'>Demo created by Huangjie Zheng and Reza Armandpour</p>**.
117
+ """
118
+ )
119
+
120
+ with gr.Row():
121
+ with gr.Column():
122
+ with gr.Tab(label="FUll prompt"):
123
+ gr.Markdown(
124
+ "### **Provide a list of prompts and their weights separated by | **"
125
+ )
126
+ prompt = gr.Textbox(label="List of prompts:", show_label=True)
127
+ weights = gr.Textbox(
128
+ label="List of weights:", show_label=True
129
+ )
130
+ seed = gr.Textbox(
131
+ label="Seed:", show_label=True
132
+ )
133
+ scale = gr.Textbox(
134
+ label="Guidance scale:", show_label=True
135
+ )
136
+ image_gen_btn = gr.Button(value="Generate")
137
+
138
+ with gr.Column():
139
+ img_output = gr.Image(
140
+ label="Generated Image",
141
+ show_label=True,
142
+ )
143
+
144
+
145
+ gr.Markdown("**Examples:**")
146
+ gr.Examples(
147
+ examples,
148
+ [prompt, weights, seed, scale],
149
+ [img_output],
150
+ fn=predict,
151
+ cache_examples=False,
152
+ )
153
+
154
+ image_gen_btn.click(
155
+ predict,
156
+ inputs=[prompt, weights, seed, scale],
157
+ outputs=[img_output],
158
+ )
159
+
160
+ gr.Markdown(
161
+ """
162
+ \n Demo created by: Huangjie Zheng and Reza Armandpour</a>.
163
+ """
164
+ )
165
+
166
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers @ git+https://github.com/huggingface/transformers.git@799cea64ac1029d66e9e58f18bc6f47892270723
4
+ git+https://github.com/Perp-Neg/Perp-Neg-stablediffusion.git
5
+ diffusers
6
+ accelerate