Falln87 commited on
Commit
25ba393
1 Parent(s): b2a890d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -0
app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from diffusers import AutoPipeline, StableDiffusionPipeline, ControlNetModel, DDIMScheduler, LMSDiscreteScheduler, UNet2DConditionModel, DiffusionPipeline
4
+ from diffusers.optimization import DDPMScheduler, DDPMSchedulerV2, PNDMScheduler
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSeq2SeqLM, LlamaTokenizerFast, LlamaForCausalLM
6
+ from accelerate import Accelerator
7
+ import torch
8
+ from peft import PeftModel, LoraConfig, get_peft_model, prepare_model_for_int8_training, prepare_model_for_int8_bf16_training
9
+
10
+ # Define a dictionary with all available models, schedulers, features, weights, and adapters
11
+ model_dict = {
12
+ "Stable Diffusion": {
13
+ "Models": [
14
+ "CompVis/stable-diffusion-v1-4",
15
+ "stabilityai/stable-diffusion-2-1",
16
+ "runwayml/stable-diffusion-v1-5",
17
+ "runwayml/stable-diffusion-inpainting",
18
+ "runwayml/stable-diffusion-video-v1-5",
19
+ "stabilityai/stable-diffusion-2-base"
20
+ ],
21
+ "Schedulers": [
22
+ "DDIMScheduler",
23
+ "LMSDiscreteScheduler"
24
+ ],
25
+ "Features": [
26
+ "Unconditional image generation",
27
+ "Text-to-image",
28
+ "Image-to-image",
29
+ "Inpainting",
30
+ "Text or image-to-video",
31
+ "Depth-to-image"
32
+ ],
33
+ "Adapters": [
34
+ "ControlNet",
35
+ "T2I-Adapter"
36
+ ],
37
+ "Weights": [
38
+ "Stable Diffusion XL",
39
+ "SDXL Turbo",
40
+ "Kandinsky",
41
+ "IP-Adapter",
42
+ "ControlNet",
43
+ "Latent Consistency Model",
44
+ "Textual inversion",
45
+ "Shap-E",
46
+ "DiffEdit",
47
+ "Trajectory Consistency Distillation-LoRA",
48
+ "Stable Video Diffusion",
49
+ "Marigold Computer Vision"
50
+ ]
51
+ },
52
+ "Llama": {
53
+ "Models": [
54
+ "decapoda-research/llama-7b-hf",
55
+ "decapoda-research/llama-13b-hf",
56
+ "decapoda-research/llama-30b-hf",
57
+ "decapoda-research/llama-65b-hf"
58
+ ],
59
+ "Tokenizers": [
60
+ "LlamaTokenizerFast"
61
+ ],
62
+ "Features": [
63
+ "AutoPipeline",
64
+ "Train a diffusion model",
65
+ "Load LoRAs for inference",
66
+ "Accelerate inference of text-to-image diffusion models",
67
+ "LOAD PIPELINES AND ADAPTERS",
68
+ "Load community pipelines and components",
69
+ "Load schedulers and models",
70
+ "Model files and layouts",
71
+ "Load adapters",
72
+ "Push files to the Hub",
73
+ "GENERATIVE TASKS",
74
+ "Unconditional image generation",
75
+ "Text-to-image",
76
+ "Image-to-image",
77
+ "Inpainting",
78
+ "Text or image-to-video",
79
+ "Depth-to-image",
80
+ "INFERENCE TECHNIQUES",
81
+ "Overview",
82
+ "Distributed inference with multiple GPUs",
83
+ "Merge LoRAs",
84
+ "Scheduler features",
85
+ "Pipeline callbacks",
86
+ "Reproducible pipelines",
87
+ "Controlling image quality",
88
+ "Prompt techniques",
89
+ "ADVANCED INFERENCE",
90
+ "Outpainting",
91
+ "SPECIFIC PIPELINE EXAMPLES",
92
+ "Stable Diffusion XL",
93
+ "SDXL Turbo",
94
+ "Kandinsky",
95
+ "IP-Adapter",
96
+ "ControlNet",
97
+ "T2I-Adapter",
98
+ "Latent Consistency Model",
99
+ "Textual inversion",
100
+ "Shap-E",
101
+ "DiffEdit",
102
+ "Trajectory Consistency Distillation-LoRA",
103
+ "Stable Video Diffusion",
104
+ "Marigold Computer Vision"
105
+ ],
106
+ "Weights": [
107
+ "LoRA weights"
108
+ ]
109
+ }
110
+ }
111
+
112
+ model_type = st.selectbox("Select a model type:", list(model_dict.keys()))
113
+
114
+ if model_type == "Stable Diffusion":
115
+ model = st.selectbox("Select a Stable Diffusion model:", model_dict[model_type]["Models"])
116
+ scheduler = st.selectbox("Select a scheduler:", model_dict[model_type]["Schedulers"])
117
+ feature = st.selectbox("Select a feature:", model_dict[model_type]["Features"])
118
+ adapter = st.selectbox("Select an adapter:", model_dict[model_type]["Adapters"])
119
+ weight = st.selectbox("Select a weight:", model_dict[model_type]["Weights"])
120
+
121
+ if st.button("Generate Images"):
122
+ st.write("Generating images...")
123
+
124
+ pipe = StableDiffusionPipeline.from_pretrained(model)
125
+ pipe.scheduler = eval(scheduler)()
126
+
127
+ if adapter == "ControlNet":
128
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_openpose")
129
+ pipe = pipe.to_controlnet(controlnet)
130
+
131
+ # Define the prompt and number of images to generate
132
+ prompt = st.text_input("Enter a prompt:")
133
+ num_images = st.slider("Number of images to generate", min_value=1, max_value=10, value=1)
134
+
135
+ # Generate the images
136
+ images = pipe(prompt, num_images=num_images, guidance_scale=7.5).images
137
+
138
+ # Display the generated images
139
+ cols = st.columns(num_images)
140
+ for i, image in enumerate(images):
141
+ cols[i].image(image, caption=f"Image {i+1}", use_column_width=True)
142
+
143
+ elif model_type == "Llama":
144
+ # Llama model implementation goes here
145
+
146
+ # ...
147
+
148
+ # ...