Spaces:
Running
on
Zero
Running
on
Zero
function defination
Browse files
app.py
CHANGED
@@ -83,7 +83,6 @@ class Model:
|
|
83 |
).images[0]
|
84 |
return image
|
85 |
|
86 |
-
@spaces.GPU
|
87 |
def run(
|
88 |
self,
|
89 |
image: dict[str, PIL.Image.Image],
|
@@ -107,6 +106,12 @@ class Model:
|
|
107 |
image = self.inference(raw_data, seed)
|
108 |
return image
|
109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
def create_demo():
|
112 |
USAGE = """## To run the demo, you should:
|
@@ -120,14 +125,11 @@ def create_demo():
|
|
120 |
4. Click the Run button.
|
121 |
"""
|
122 |
|
123 |
-
model = Model()
|
124 |
-
|
125 |
with gr.Blocks() as demo:
|
126 |
gr.HTML(
|
127 |
"""<h1 style="text-align: center;"><b><i>λ-ECLIPSE</i>: Multi-Concept Personalized Text-to-Image Diffusion Models by Leveraging CLIP Latent Space</b></h1>
|
128 |
<h1 style='text-align: center;'><a href='https://eclipse-t2i.github.io/Lambda-ECLIPSE/'>Project Page</a> | <a href='#'>Paper</a> </h1>
|
129 |
|
130 |
-
<p style="text-align: center; color: red;">This demo is currently hosted on either a small GPU or CPU. We will soon provide high-end GPU support.</p>
|
131 |
<p style="text-align: center; color: red;">Please follow the instructions from here to run it locally: <a href="https://github.com/eclipse-t2i/lambda-eclipse-inference">GitHub Inference Code</a></p>
|
132 |
|
133 |
<a href="https://colab.research.google.com/drive/1VcqzXZmilntec3AsIyzCqlstEhX4Pa1o?usp=sharing" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
|
@@ -244,11 +246,11 @@ def create_demo():
|
|
244 |
],
|
245 |
],
|
246 |
inputs=inputs,
|
247 |
-
fn=
|
248 |
outputs=result,
|
249 |
)
|
250 |
|
251 |
-
run_button.click(fn=
|
252 |
return demo
|
253 |
|
254 |
|
|
|
83 |
).images[0]
|
84 |
return image
|
85 |
|
|
|
86 |
def run(
|
87 |
self,
|
88 |
image: dict[str, PIL.Image.Image],
|
|
|
106 |
image = self.inference(raw_data, seed)
|
107 |
return image
|
108 |
|
109 |
+
model = Model()
|
110 |
+
|
111 |
+
@spaces.GPU
|
112 |
+
def generate_image(image,keyword,image2,keyword2,text,seed):
|
113 |
+
return model.run(image,keyword,image2,keyword2,text,seed)
|
114 |
+
|
115 |
|
116 |
def create_demo():
|
117 |
USAGE = """## To run the demo, you should:
|
|
|
125 |
4. Click the Run button.
|
126 |
"""
|
127 |
|
|
|
|
|
128 |
with gr.Blocks() as demo:
|
129 |
gr.HTML(
|
130 |
"""<h1 style="text-align: center;"><b><i>λ-ECLIPSE</i>: Multi-Concept Personalized Text-to-Image Diffusion Models by Leveraging CLIP Latent Space</b></h1>
|
131 |
<h1 style='text-align: center;'><a href='https://eclipse-t2i.github.io/Lambda-ECLIPSE/'>Project Page</a> | <a href='#'>Paper</a> </h1>
|
132 |
|
|
|
133 |
<p style="text-align: center; color: red;">Please follow the instructions from here to run it locally: <a href="https://github.com/eclipse-t2i/lambda-eclipse-inference">GitHub Inference Code</a></p>
|
134 |
|
135 |
<a href="https://colab.research.google.com/drive/1VcqzXZmilntec3AsIyzCqlstEhX4Pa1o?usp=sharing" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
|
|
|
246 |
],
|
247 |
],
|
248 |
inputs=inputs,
|
249 |
+
fn=generate_image,
|
250 |
outputs=result,
|
251 |
)
|
252 |
|
253 |
+
run_button.click(fn=generate_image, inputs=inputs, outputs=result)
|
254 |
return demo
|
255 |
|
256 |
|