Blane187 commited on
Commit
69227b5
1 Parent(s): bd85d3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -24
app.py CHANGED
@@ -1,28 +1,7 @@
1
-
2
  import gradio as gr
3
- from diffusers import StableDiffusionPipeline
4
- import torch
5
-
6
- # Load the model
7
- model_id = "Blane187/ai-hoshino-s1-ponyxl-lora-nochekaise"
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32)
10
- pipe.to(device)
11
 
12
- # Define the inference function
13
- def generate_image(prompt):
14
- with torch.autocast(device):
15
- image = pipe(prompt).images[0]
16
- return image
17
 
18
- # Create Gradio Interface
19
- iface = gr.Interface(
20
- fn=generate_image,
21
- inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
22
- outputs=gr.Image(type="pil"),
23
- theme="Blane187/fuchsia",
24
- examples=[["ai hoshino, long hair, bangs, purple eyes, purple hair, symbol-shaped pupils,"]]
25
- )
26
 
27
- # Launch the interface
28
- iface.launch()
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
2
 
3
+ model = "Blane187/ai-hoshino-s1-ponyxl-lora-nochekaise"
 
 
 
 
4
 
 
 
 
 
 
 
 
 
5
 
6
+ demo = gr.load(model, src="models", theme="Blane187/fuchsia")
7
+ demo.launch()