faalbane commited on
Commit
1b9bdb6
Β·
1 Parent(s): e932803

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -45
app.py CHANGED
@@ -1,38 +1,13 @@
1
 
2
- # import gradio as gr
3
- # gr.Interface.load("models/faalbane/kopper-kreations-custom-sd-v-2-1-style-v2").launch()
4
-
5
- #@title Install and import requirements
6
- !pip install -qqq diffusers==0.11.1 transformers gradio ftfy accelerate
7
-
8
- import diffusers
9
- import gradio
10
  from PIL import Image
11
- def image_grid(imgs, rows, cols):
12
- assert len(imgs) == rows*cols
13
-
14
- w, h = imgs[0].size
15
- grid = Image.new('RGB', size=(cols*w, rows*h))
16
- grid_w, grid_h = grid.size
17
-
18
- for i, img in enumerate(imgs):
19
- grid.paste(img, box=(i%cols*w, i//cols*h))
20
- return grid
21
-
22
- #@title Load the model from the [Concepts Library](https://huggingface.co/sd-dreambooth-library). If you are new to Stable Diffusion, make sure you [read the LICENSE](https://github.com/CompVis/stable-diffusion/blob/main/LICENSE)
23
- #@markdown You may also use a locally trained model by replacing the `model_id` to a path with the model locally or on Google Drive
24
- from torch import autocast
25
- from diffusers import StableDiffusionPipeline
26
  import torch
 
27
 
28
- model_id = "models/faalbane/kopper-kreations-custom-sd-v-2-1-model-object-copper-heart" #@param {type:"string"}
29
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
30
-
31
- #@title Run the Stable Diffusion pipeline with interactive UI Demo on Gradio
32
- #@markdown Run this cell to get a Gradio UI like this to run your models
33
-
34
- #@markdown ![](https://i.imgur.com/bxHfawQ.png)
35
- import gradio as gr
36
 
37
  def inference(prompt, num_samples):
38
  all_images = []
@@ -41,18 +16,11 @@ def inference(prompt, num_samples):
41
  all_images.extend(images)
42
  return all_images
43
 
44
- with gr.Blocks() as demo:
45
- gr.HTML("<h2 style=\"font-size: 2em; font-weight: bold\" align=\"center\">Stable Diffusion Dreambooth - Run Concept</h2>")
46
- with gr.Row():
47
- with gr.Column():
48
- prompt = gr.Textbox(label="prompt")
49
- samples = gr.Slider(label="Samples",value=1)
50
- run = gr.Button(value="Run")
51
- with gr.Column():
52
- gallery = gr.Gallery(show_label=False)
53
-
54
- run.click(inference, inputs=[prompt,samples], outputs=gallery)
55
- # gr.Examples([["a photo of sks toy riding a bicycle", 1,1]], [prompt,samples], gallery, inference, cache_examples=False)
56
-
57
 
58
- demo.launch(debug=True)
 
1
 
2
+ import gradio as gr
 
 
 
 
 
 
 
3
  from PIL import Image
4
+ import diffusers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import torch
6
+ from diffusers import StableDiffusionPipeline
7
 
8
+ # Load the model
9
+ model_id = "faalbane/kopper-kreations-custom-sd-v-2-1-model-object-copper-heart"
10
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
 
 
 
 
 
11
 
12
  def inference(prompt, num_samples):
13
  all_images = []
 
16
  all_images.extend(images)
17
  return all_images
18
 
19
+ # Create Gradio interface
20
+ iface = gr.Interface(
21
+ fn=inference,
22
+ inputs=["textbox", "slider"],
23
+ outputs="gallery",
24
+ )
 
 
 
 
 
 
 
25
 
26
+ iface.launch()