Phauglin commited on
Commit
17b2d57
·
verified ·
1 Parent(s): 7bf7d7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -15
app.py CHANGED
@@ -4,6 +4,8 @@ import random
4
  from fastai.vision.all import *
5
  from openai import OpenAI
6
  from pathlib import Path
 
 
7
 
8
  # Dictionary of plant names and their Wikipedia links
9
  search_terms_wikipedia = {
@@ -122,7 +124,8 @@ def process_image(img, generate_image=True):
122
  )
123
 
124
  image_base64 = result.data[0].b64_json
125
- generated_image = base64.b64decode(image_base64)
 
126
  else:
127
  generated_image = None
128
 
@@ -148,20 +151,39 @@ def clear_outputs():
148
  learn = load_learner('resnet50_30_categories.pkl')
149
 
150
  # Create the web interface
151
- demo = gr.Interface(
152
- fn=process_image,
153
- inputs=gr.Image(height=230, width=230, label="Upload Image for Classification", type="pil"),
154
- outputs=[
155
- gr.Label(label="Classification Results"),
156
- gr.Image(label="AI Generated Interpretation"),
157
- gr.Textbox(label="Wikipedia Article Link", lines=1),
158
- gr.Textbox(label="Endangerment Status", lines=1)
159
- ],
160
- examples=example_images,
161
- examples_per_page=6,
162
- title="California Native Flower Classifier",
163
- description="Upload an image of a California native flower to classify it and see an AI-generated interpretation."
164
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  # Start the application
167
  demo.launch(inline=False)
 
4
  from fastai.vision.all import *
5
  from openai import OpenAI
6
  from pathlib import Path
7
+ from PIL import Image
8
+ import io
9
 
10
  # Dictionary of plant names and their Wikipedia links
11
  search_terms_wikipedia = {
 
124
  )
125
 
126
  image_base64 = result.data[0].b64_json
127
+ image_bytes = base64.b64decode(image_base64)
128
+ generated_image = Image.open(io.BytesIO(image_bytes))
129
  else:
130
  generated_image = None
131
 
 
151
  learn = load_learner('resnet50_30_categories.pkl')
152
 
153
  # Create the web interface
154
+ with gr.Blocks() as demo:
155
+ # Input section
156
+ with gr.Row():
157
+ input_image = gr.Image(height=230, width=230, label="Upload Image for Classification", type="pil")
158
+
159
+ # Output section
160
+ with gr.Row():
161
+ with gr.Column():
162
+ label_output = gr.Label(label="Classification Results")
163
+ wiki_output = gr.Textbox(label="Wikipedia Article Link", lines=1)
164
+ endangerment_output = gr.Textbox(label="Endangerment Status", lines=1) # ← NEW
165
+ generated_image = gr.Image(label="AI Generated Interpretation")
166
+
167
+ # Add example images using local paths
168
+ gr.Examples(
169
+ examples=example_images,
170
+ inputs=input_image,
171
+ examples_per_page=6,
172
+ fn=None,
173
+ outputs=None
174
+ )
175
+
176
+ input_image.change(
177
+ fn=lambda img: process_image(img, generate_image=True),
178
+ inputs=input_image,
179
+ outputs=[label_output, generated_image, wiki_output, endangerment_output]
180
+ )
181
+
182
+ input_image.clear(
183
+ fn=clear_outputs,
184
+ inputs=[],
185
+ outputs=[label_output, generated_image, wiki_output, endangerment_output]
186
+ )
187
 
188
  # Start the application
189
  demo.launch(inline=False)