iohanngrig commited on
Commit
229f975
1 Parent(s): 198a971

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import io
4
+ import warnings
5
+ from PIL import Image
6
+ from stability_sdk import client
7
+ import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
8
+ import streamlit as st
9
+
10
+ STABILITY_KEY = st.secrets["STABILITY_KEY"]
11
+ OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
12
+ MODEL = st.secrets["MODEL"]
13
+ MODEL2 = st.secrets["MODEL2"]
14
+
15
+ openai.api_key = OPENAI_API_KEY
16
+
17
+ st.title('Image Generator App')
18
+
19
+ # Initialize session state if it doesn't exist
20
+ if 'prompts' not in st.session_state:
21
+ st.session_state['prompts'] = []
22
+ if 'selected_prompt' not in st.session_state:
23
+ st.session_state['selected_prompt'] = ""
24
+ if 'edited_prompt' not in st.session_state:
25
+ st.session_state['edited_prompt'] = ""
26
+
27
+ st.session_state['edited_prompt'] = st.text_input(r'Input Prompt:', value=st.session_state['selected_prompt'])
28
+
29
+ def generateImageViaStabilityai(prompt):
30
+ os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443'
31
+ stability_api = client.StabilityInference(
32
+ key=STABILITY_KEY, # API Key reference.
33
+ verbose=True, # Print debug messages.
34
+ engine="stable-diffusion-xl-1024-v1-0",
35
+ )
36
+
37
+ # Set up our initial generation parameters.
38
+ answers = stability_api.generate(
39
+ prompt=prompt,
40
+ seed=4253978046, # If a seed is provided, the resulting generated image will be deterministic.
41
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
42
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
43
+ steps=50, # Amount of inference steps performed on image generation. Defaults to 30.
44
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
45
+ # Setting this value higher increases the strength in which it tries to match your prompt.
46
+ # Defaults to 7.0 if not specified.
47
+ width=1024, # Generation width, defaults to 512 if not included.
48
+ height=1024, # Generation height, defaults to 512 if not included.
49
+ style_preset="photographic",
50
+ samples=5, # Number of images to generate, defaults to 1 if not included.
51
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
52
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
53
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde)
54
+ )
55
+
56
+ # Set up our warning to print to the console if the adult content classifier is tripped.
57
+ for resp in answers:
58
+ for artifact in resp.artifacts:
59
+ if artifact.finish_reason == generation.FILTER:
60
+ warnings.warn(
61
+ "Your request activated the API's safety filters and could not be processed."
62
+ "Please modify the prompt and try again.")
63
+ if artifact.type == generation.ARTIFACT_IMAGE:
64
+ img = Image.open(io.BytesIO(artifact.binary))
65
+ #img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
66
+ st.image(img, caption=f'Seed {artifact.seed}', use_column_width=True)
67
+
68
+ # Button to generate the image
69
+ if st.button(r'generate image'):
70
+ generateImageViaStabilityai(prompt=st.session_state['edited_prompt'])
71
+ st.session_state['prompt_generated'] = False