Falln87 commited on
Commit
dc36510
1 Parent(s): 8564ce3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -101
app.py CHANGED
@@ -1,103 +1,47 @@
 
 
1
  import streamlit as st
2
- from streamlit_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode
3
- from diffusers import DiffusionPipeline, StableDiffusionPipeline, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler
4
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
- from datasets import load_dataset
6
  import torch
7
-
8
- # Set page title and favicon
9
- st.set_page_config(page_title="Diffusers UI", page_icon=":art:")
10
-
11
- # Sidebar for selecting models and tasks
12
- st.sidebar.title("Diffusers UI")
13
- model_type = st.sidebar.selectbox("Select Model Type", ["Text-to-Image", "Image-to-Image", "Text-to-Text"])
14
- task = st.sidebar.selectbox("Select Task", ["Image Generation", "Image Editing", "Text Generation"])
15
-
16
- # Load Hugging Face Hub models based on selected model type and task
17
- if model_type == "Text-to-Image":
18
- if task == "Image Generation":
19
- model_id = "CompVis/stable-diffusion-v1-4"
20
- elif task == "Image Editing":
21
- model_id = "runwayml/stable-diffusion-inpainting"
22
- elif model_type == "Image-to-Image":
23
- if task == "Image Generation":
24
- model_id = "CompVis/stable-diffusion-v1-4"
25
- elif task == "Image Editing":
26
- model_id = "CompVis/stable-diffusion-v1-4"
27
- elif model_type == "Text-to-Text":
28
- if task == "Text Generation":
29
- model_id = "gpt2"
30
-
31
- # Load model and tokenizer
32
- if model_type in ["Text-to-Image", "Image-to-Image"]:
33
- try:
34
- pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
35
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
36
- except Exception as e:
37
- st.error(f"Error loading model: {e}")
38
- st.stop()
39
- elif model_type == "Text-to-Text":
40
- try:
41
- tokenizer = AutoTokenizer.from_pretrained(model_id)
42
- model = AutoModelForCausalLM.from_pretrained(model_id)
43
- except Exception as e:
44
- st.error(f"Error loading model: {e}")
45
- st.stop()
46
-
47
- # Main app layout
48
- st.title("Diffusers UI")
49
-
50
- # Input section
51
- with st.form("input_form"):
52
- if model_type in ["Text-to-Image", "Image-to-Image"]:
53
- prompt = st.text_input("Enter a prompt", "a photo of an astronaut riding a horse")
54
- num_inference_steps = st.slider("Number of inference steps", 1, 50, 25)
55
- guidance_scale = st.slider("Guidance scale", 1.0, 10.0, 7.5)
56
- image = st.file_uploader("Upload an image (optional)", type=["png", "jpg", "jpeg"])
57
- elif model_type == "Text-to-Text":
58
- input_text = st.text_input("Enter input text", "Hello, my name is")
59
- max_length = st.slider("Maximum length of generated text", 1, 100, 20)
60
-
61
- submitted = st.form_submit_button("Generate")
62
-
63
- # Output section
64
- if submitted:
65
- if model_type in ["Text-to-Image", "Image-to-Image"]:
66
- if image is not None:
67
- image = Image.open(image)
68
- image = image.resize((768, 768))
69
- image = np.array(image).astype(np.float32) / 255.0
70
- image = image[None].transpose(0, 3, 1, 2)
71
- image = torch.from_numpy(image)
72
- image = 2.0 * image - 1.0
73
- image = image.to(pipe.device)
74
-
75
- with st.spinner("Generating image..."):
76
- try:
77
- image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
78
- except Exception as e:
79
- st.error(f"Error generating image: {e}")
80
- st.stop()
81
-
82
- st.image(image, caption="Generated Image", use_column_width=True)
83
- else:
84
- with st.spinner("Generating image..."):
85
- try:
86
- image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
87
- except Exception as e:
88
- st.error(f"Error generating image: {e}")
89
- st.stop()
90
-
91
- st.image(image, caption="Generated Image", use_column_width=True)
92
- elif model_type == "Text-to-Text":
93
- with st.spinner("Generating text..."):
94
- try:
95
- input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(model.device)
96
- generated_ids = model.generate(input_ids, max_length=max_length)
97
- generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
98
- except Exception as e:
99
- st.error(f"Error generating text: {e}")
100
- st.stop()
101
-
102
- st.write("Generated Text:")
103
- st.write(generated_text)
 
1
+ # app.py
2
+
3
  import streamlit as st
 
 
 
 
4
  import torch
5
+ from diffusers import DiffusionPipeline
6
+ from PIL import Image
7
+
8
+ # Set up the Streamlit app
9
+ st.set_page_config(page_title="Huggingface Diffusers Showcase", page_icon=":guardsman:", layout="wide")
10
+
11
+ # Add a sidebar
12
+ st.sidebar.title("Navigation")
13
+ st.sidebar.markdown("## Choose a Model")
14
+ model_options = ["stable-diffusion", "ddim", "ddpm"]
15
+ selected_model = st.sidebar.selectbox("Select a model", model_options)
16
+
17
+ # Load the selected model
18
+ if selected_model == "stable-diffusion":
19
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-512-v2")
20
+ elif selected_model == "ddim":
21
+ pipe = DiffusionPipeline.from_pretrained("CompVis/ddim-512-fp16")
22
+ else:
23
+ pipe = DiffusionPipeline.from_pretrained("CompVis/ddpm-512-fp16")
24
+
25
+ # Add a text input for the user to enter a prompt
26
+ prompt = st.text_input("Enter a text prompt:", "A sunset over a mountain")
27
+
28
+ # Add a button to generate the image
29
+ if st.button("Generate Image"):
30
+ # Generate the image
31
+ image = pipe(prompt, num_inference_steps=50).images[0]
32
+
33
+ # Display the generated image
34
+ col1, col2 = st.columns(2)
35
+ with col1:
36
+ st.subheader("Prompt")
37
+ st.markdown(prompt)
38
+ with col2:
39
+ st.subheader("Generated Image")
40
+ st.image(image, caption="Generated using the selected diffusion model.", use_column_width=True)
41
+
42
+ # Add a section for the app info
43
+ st.sidebar.markdown("---")
44
+ st.sidebar.markdown("## App Info")
45
+ st.sidebar.markdown("This app showcases the Huggingface Diffusers library and its features.")
46
+ st.sidebar.markdown("You can select a model from the sidebar and enter a text prompt to generate an image.")
47
+ st.sidebar.markdown("Visit the [Huggingface Diffusers documentation](https://huggingface.co/docs/diffusers) for more information.")