ruslanmv commited on
Commit
a5a0634
1 Parent(s): 7bb84aa

Add application file

Browse files
Files changed (4) hide show
  1. .gitignore +3 -0
  2. README.md +2 -2
  3. app.py +96 -0
  4. requirements.txt +24 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ venv/
2
+ __pycache__/
3
+ *.py[cod]
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Idea To Image SDXL
3
  emoji: 🐠
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.14.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Idea to Image SDXL
3
  emoji: 🐠
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Created by ruslanmv.com
2
+ ## Happy coding!
3
+
4
+ import gradio as gr
5
+ import torch
6
+ import modin.pandas as pd
7
+ import numpy as np
8
+ from diffusers import DiffusionPipeline
9
+ from transformers import pipeline
10
+
11
+ pipe = pipeline("text-generation", model="daspartho/prompt-extend")
12
+
13
+
14
+ def extend_prompt(prompt):
15
+ return pipe(prompt + ",", num_return_sequences=1)[0]["generated_text"]
16
+
17
+
18
+ def text_it(inputs):
19
+ return extend_prompt(inputs)
20
+
21
+
22
+ custom_cache_dir = "./.cache/stabilityai/sdxl-turbo"
23
+
24
+
25
+ def load_pipeline(use_cuda):
26
+ device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
27
+ if device == "cuda":
28
+ torch.cuda.max_memory_allocated(device=device)
29
+ torch.cuda.empty_cache()
30
+ pipe = DiffusionPipeline.from_pretrained(
31
+ "stabilityai/sdxl-turbo",
32
+ torch_dtype=torch.float16,
33
+ variant="fp16",
34
+ use_safetensors=True,
35
+ cache_dir=custom_cache_dir,
36
+ )
37
+ pipe.enable_xformers_memory_efficient_attention()
38
+ pipe = pipe.to(device)
39
+ torch.cuda.empty_cache()
40
+ else:
41
+ pipe = DiffusionPipeline.from_pretrained(
42
+ "stabilityai/sdxl-turbo", use_safetensors=True, cache_dir=custom_cache_dir
43
+ )
44
+ pipe = pipe.to(device)
45
+ return pipe
46
+
47
+
48
+ def genie(prompt="sexy woman", steps=2, seed=398231747038484200, use_cuda=False):
49
+ pipe = load_pipeline(use_cuda)
50
+ generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
51
+ extended_prompt = extend_prompt(prompt)
52
+ int_image = pipe(
53
+ prompt=extended_prompt,
54
+ generator=generator,
55
+ num_inference_steps=steps,
56
+ guidance_scale=0.0,
57
+ ).images[0]
58
+ return int_image, extended_prompt
59
+
60
+
61
+ with gr.Blocks() as myface:
62
+ gr.HTML()
63
+ with gr.Row():
64
+ with gr.Row():
65
+ input_text = gr.Textbox(label="Prompt idea.", lines=1)
66
+ steps_slider = gr.Slider(
67
+ 1, maximum=5, value=2, step=1, label="Number of Iterations"
68
+ )
69
+ seed_slider = gr.Slider(
70
+ minimum=0, step=1, maximum=999999999999999999, randomize=True
71
+ )
72
+ cuda_checkbox = gr.Checkbox(label="cuda", info="Do you have cuda?")
73
+ with gr.Row():
74
+ generate_button = gr.Button("Generate")
75
+ with gr.Row():
76
+ output_image = gr.Image()
77
+ output_text = gr.Textbox(label="Generated Text", lines=2)
78
+ generate_button.click(
79
+ genie,
80
+ inputs=[input_text, steps_slider, seed_slider, cuda_checkbox],
81
+ outputs=[output_image, output_text],
82
+ concurrency_limit=10,
83
+ )
84
+
85
+ # Define the example
86
+ example = [["sexy woman", 2, 398231747038484200, ""]]
87
+
88
+ with gr.Interface(
89
+ fn=genie,
90
+ inputs=[input_text, steps_slider, seed_slider, cuda_checkbox],
91
+ outputs=[output_image, output_text],
92
+ title="Stable Diffusion Turbo CPU or GPU",
93
+ description="Type your idea, and let's create a detailed image.",
94
+ examples=example,
95
+ ) as iface:
96
+ iface.launch(inline=True, show_api=False, max_threads=200)
requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.23.1
2
+ transformers
3
+ gradio==4.7.1
4
+ --extra-index-url https://download.pytorch.org/whl/cu121
5
+ torch==2.1.0
6
+ fastapi==0.104.0
7
+ uvicorn==0.23.2
8
+ Pillow==10.1.0
9
+ accelerate==0.24.0
10
+ compel==2.0.2
11
+ controlnet-aux==0.0.7
12
+ peft==0.6.0
13
+ xformers
14
+ hf_transfer
15
+ ipykernel
16
+ notebook
17
+ jupyterlab
18
+ ipywidgets
19
+ # Install the triton package manually from the local wheel file:
20
+ ./whl/triton-2.0.0-cp310-cp310-win_amd64.whl
21
+
22
+ modin[all]
23
+ invisible_watermark
24
+ ftfy