File size: 2,833 Bytes
a7e4302
 
 
58fb10e
a7e4302
58fb10e
a7e4302
 
 
 
 
 
 
 
 
 
 
 
58fb10e
 
 
 
a7e4302
af3bb2f
 
 
 
 
 
a7e4302
58fb10e
a7e4302
 
 
58fb10e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7e4302
 
 
 
58fb10e
a7e4302
43be5e0
 
 
 
 
af3bb2f
 
 
 
58fb10e
 
 
 
 
 
 
af3bb2f
58fb10e
a7e4302
 
 
 
58fb10e
a7e4302
 
 
 
 
 
58fb10e
a7e4302
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import tensorflow as tf
import numpy as np
import google.generativeai as genai
import os

app = FastAPI()

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Configure Gemini API
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', 'AIzaSyBx0A7BA-nKVZOiVn39JXzdGKgeGQqwAFg')
genai.configure(api_key=GEMINI_API_KEY)
gemini_model = genai.GenerativeModel('gemini-pro')

# Load model with specific version handling
model = tf.keras.models.load_model(
    'Image_classify.keras',
    custom_objects=None,
    compile=False  # Don't compile the model on load
)

# Define categories and image dimensions
data_cat = ['disposable cups', 'paper', 'plastic bottle']
img_height, img_width = 224, 224

def generate_recycling_insight(detected_object):
    """Generate sustainability insights for detected objects"""
    try:
        prompt = f"""
        You are a sustainability-focused AI. Analyze the {detected_object} (which is a solid dry waste) 
        and generate the top three innovative, eco-friendly recommendations for repurposing it. Ensure each recommendation is:
        - Give the Title of the recommendation
        - Practical and easy to implement
        - Environmentally beneficial
        - Clearly explained in one or two concise sentences
        """
        
        response = gemini_model.generate_content(prompt)
        return response.text.strip()
    except Exception as e:
        return f"Error generating insight: {str(e)}"

@app.post("/predict")
async def predict(file: UploadFile = File(...)):
    try:
        # Read and preprocess the image
        contents = await file.read()
        image = tf.image.decode_image(contents, channels=3)
        image = tf.image.resize(image, [img_height, img_width])
        image = tf.cast(image, tf.float32)
        image = tf.expand_dims(image, 0)

        # Make prediction
        predictions = model.predict(image, verbose=0)
        score = tf.nn.softmax(predictions[0])
        confidence = float(np.max(score) * 100)
        
        if confidence < 45:
            return {
                "error": "Confidence too low to make a prediction",
                "confidence": confidence
            }
        
        predicted_class = data_cat[np.argmax(score)]
        sustainability_insight = generate_recycling_insight(predicted_class)
        
        return {
            "class": predicted_class,
            "confidence": confidence,
            "insights": sustainability_insight
        }
    
    except Exception as e:
        return {"error": str(e)}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)