Zai commited on
Commit
761d68b
1 Parent(s): 6bed04f

build first

Browse files
__pycache__/choices.cpython-310.pyc ADDED
Binary file (284 Bytes). View file
 
__pycache__/functions.cpython-310.pyc ADDED
Binary file (929 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from functions import generate_controlnet_image, generate_image, generate_video
3
+ from choices import text_to_image_models,controlnet_models,image_to_video_models
4
+
5
+ def text_to_image(input_text, selected_model):
6
+
7
+ result = generate_image(selected_model,input_text)
8
+
9
+ return result
10
+
11
+
12
+ def image_to_image(input_text,input_image, selected_model, selected_algorithm):
13
+
14
+
15
+ return "path_to_image_to_image_result.jpg"
16
+
17
+
18
+ def image_to_video(input_text,input_image, selected_model1, selected_model2, selected_algorithm):
19
+
20
+
21
+ return "path_to_image_to_video_result.jpg"
22
+
23
+
24
+ def main():
25
+ st.title("Image Processing Options")
26
+
27
+
28
+ processing_option = st.selectbox("Select Processing Option", ["Text to Image", "Image to Image", "Image to Video"])
29
+
30
+
31
+ if processing_option == "Text to Image":
32
+ input_text = st.text_area("Enter Text for Image")
33
+ selected_model = st.selectbox("Select Model", text_to_image_models)
34
+ if st.button("Generate Image"):
35
+ result_image_path = text_to_image(input_text, selected_model)
36
+ st.image(result_image_path, caption="Result Image", use_column_width=True)
37
+
38
+ elif processing_option == "Image to Image" or processing_option == "Image to Video":
39
+ input_text = st.text_area("Enter Text for Image")
40
+ input_image = st.file_uploader("Upload Input Image", type=["jpg", "png"])
41
+ selected_algorithm = st.selectbox("Select Algorithm", ["Algorithm 1", "Algorithm 2"])
42
+
43
+ if processing_option == "Image to Image":
44
+ selected_model = st.selectbox("Select Model", controlnet_models)
45
+ elif processing_option == "Image to Video":
46
+ selected_model = st.selectbox("Select Model", image_to_video_models)
47
+
48
+ if st.button("Process"):
49
+ if input_image is not None:
50
+ if processing_option == "Image to Image":
51
+ result_image_path = image_to_image(input_text,input_image, selected_model, selected_algorithm)
52
+ elif processing_option == "Image to Video":
53
+ result_image_path = image_to_video(input_text,input_image, selected_model, selected_algorithm)
54
+ st.image(result_image_path, caption="Result Image", use_column_width=True)
55
+ else:
56
+ st.warning("Please upload an input image.")
57
+
58
+ if __name__ == "__main__":
59
+ main()
choices.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ text_to_image_models = ['stabilityai/stable-diffusion-xl-base-1.0']
2
+
3
+ controlnet_models=['']
4
+
5
+ image_to_video_models=['']
functions.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline,StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
2
+ from diffusers.utils import load_image
3
+ import torch
4
+
5
+
6
+ def generate_image(model_name,input_text):
7
+ pipe = StableDiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.float16)
8
+ # pipe = pipe.to("cuda")
9
+
10
+ prompt = input_text
11
+ image = pipe(prompt).images[0]
12
+
13
+ image.save("testo.png")
14
+ return image
15
+
16
+ def generate_controlnet_image(model_name,algorithm,input_image,input_text):
17
+ mask_image = generate_mask(input_image,algorithm)
18
+
19
+ base_model_path = model_name
20
+ controlnet_path = "lllyasviel/control_v11p_sd15_inpaint"
21
+
22
+ controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
23
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
24
+ base_model_path, controlnet=controlnet, torch_dtype=torch.float16
25
+ )
26
+
27
+ # speed up diffusion process with faster scheduler and memory optimization
28
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
29
+ # remove following line if xformers is not installed or when using Torch 2.0.
30
+ pipe.enable_xformers_memory_efficient_attention()
31
+ # memory optimization.
32
+ pipe.enable_model_cpu_offload()
33
+
34
+ control_image = load_image(mask_image)
35
+ prompt = "pale golden rod circle with old lace background"
36
+
37
+ # generate image
38
+ generator = torch.manual_seed(0)
39
+ image = pipe(
40
+ prompt, num_inference_steps=20, generator=generator, image=control_image
41
+ ).images[0]
42
+ image.save("./output.png")
43
+
44
+ return mask_image
45
+
46
+ def generate_video(model_name,input_image,input_text):
47
+ return input_image
48
+
49
+ def generate_mask(image,algorithm):
50
+ pass
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ diffusers
3
+ torch
4
+ transformers
5
+ opencv-python