codewithdark commited on
Commit
4cac5ca
·
verified ·
1 Parent(s): c35810d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -21
app.py CHANGED
@@ -35,20 +35,23 @@ except Exception as e:
35
  def generate_image_from_model(prompt):
36
  try:
37
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
 
38
  image_bytes = response.content
 
 
39
  image = Image.open(io.BytesIO(image_bytes))
40
  return image
41
  except Exception as e:
42
- st.error(f"Error occurs:{e}")
43
 
 
 
44
  def generate_image(prompt):
45
- try:
46
- response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
47
- image_bytes = response.content
48
- image = Image.open(io.BytesIO(image_bytes))
49
- return image
50
- except Exception as e:
51
- st.error(f"Error occurs:{e}")
52
  # Streamlit app
53
  def main():
54
  try:
@@ -123,15 +126,18 @@ def main():
123
  st.session_state.chat_history.append({"role": "bot", "content": generated_image})
124
 
125
  # Display the generated image
126
- for index, chat in enumerate(st.session_state.chat_history):
127
- with st.chat_message(chat["role"]):
128
- if chat["role"] == "user":
129
- st.markdown(user_input)
130
- elif chat["role"] == "bot":
131
- st.image(generated_image, width=400)
 
 
 
132
 
133
  else:
134
- GOOGLE_API_KEY = "your_Gemini_Api_key"
135
  genai.configure(api_key=GOOGLE_API_KEY)
136
  model = genai.GenerativeModel('gemini-1.0-pro')
137
  prompt = user_input
@@ -160,12 +166,15 @@ def main():
160
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
161
  prompt = user_input
162
  generated_image = generate_image_from_model(prompt)
163
- for index, chat in enumerate(st.session_state.chat_history):
164
- with st.chat_message(chat["role"]):
165
- if chat["role"] == "user":
166
- st.markdown(user_input)
167
- elif chat["role"] == "bot":
168
- st.image(generated_image, width=400)
 
 
 
169
 
170
  else:
171
  try:
 
35
  def generate_image_from_model(prompt):
36
  try:
37
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
38
+ response.raise_for_status() # Raise an error for bad responses
39
  image_bytes = response.content
40
+ if not image_bytes:
41
+ raise ValueError("Empty image content received from the API")
42
  image = Image.open(io.BytesIO(image_bytes))
43
  return image
44
  except Exception as e:
45
+ st.error(f"Error generating image from model: {e}")
46
 
47
+ return None
48
+
49
  def generate_image(prompt):
50
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
51
+ image_bytes = response.content
52
+ image = Image.open(io.BytesIO(image_bytes))
53
+ return image
54
+
 
 
55
  # Streamlit app
56
  def main():
57
  try:
 
126
  st.session_state.chat_history.append({"role": "bot", "content": generated_image})
127
 
128
  # Display the generated image
129
+ if generated_image is not None:
130
+ for index, chat in enumerate(st.session_state.chat_history):
131
+ with st.chat_message(chat["role"]):
132
+ if chat["role"] == "user":
133
+ st.markdown(user_input)
134
+ elif chat["role"] == "bot":
135
+ st.image(generated_image, width=400)
136
+ else:
137
+ st.error("Failed to generate image. Check logs for details.")
138
 
139
  else:
140
+ GOOGLE_API_KEY = "your_gemini_Api_key"
141
  genai.configure(api_key=GOOGLE_API_KEY)
142
  model = genai.GenerativeModel('gemini-1.0-pro')
143
  prompt = user_input
 
166
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
167
  prompt = user_input
168
  generated_image = generate_image_from_model(prompt)
169
+ if generated_image is not None:
170
+ for index, chat in enumerate(st.session_state.chat_history):
171
+ with st.chat_message(chat["role"]):
172
+ if chat["role"] == "user":
173
+ st.markdown(user_input)
174
+ elif chat["role"] == "bot":
175
+ st.image(generated_image, width=400)
176
+ else:
177
+ st.error("Failed to generate image. Check logs for details.")
178
 
179
  else:
180
  try: