nutri-ai / app.py
Norphel's picture
Update app.py
8a05b4e verified
import streamlit as st
from ultralytics import YOLO
from huggingface_hub import hf_hub_download
from PIL import Image
import io
import cv2
import numpy as np
# Load YOLOv8 model from Hugging Face
repo_id = "Norphel/nutri-ai-n2" # Replace with your Hugging Face model repo
model_file = "best.pt"
model_path = hf_hub_download(repo_id=repo_id, filename=model_file)
model = YOLO(model_path)
# Food nutrition data
foods = {
"kewadatsi": {
"calories": 400,
"protein": "10g",
"fat": "0.4g",
"carbohydrates": "92g",
"sodium": "20mg"
},
"emadatsi": {
"calories": 275,
"protein": "10g",
"fat": "0.4g",
"carbohydrates": "92g",
"sodium": "850mg"
},
"rice": {
"calories": 130,
"protein": "2.7g",
"fat": "0.3g",
"carbohydrates": "28g",
"sodium": "1mg"
}
}
# Function to calculate bounding box area
def calculate_bounding_box_area(results):
areas = []
class_names = []
# Iterate through all bounding boxes
for box in results[0].boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0].tolist()) # Bounding box coordinates
area = (x2 - x1) * (y2 - y1) # Calculate area
areas.append(area)
# Get the class ID for the current bounding box
class_ids = box.cls.tolist() # This returns a list of class IDs
print(class_ids)
# Convert class ID(s) to class name(s)
for class_id in class_ids:
class_name = model.names[int(class_id)] # Convert class ID to class name
class_names.append(class_name) # Add class name to list
return class_names, areas
# Function to process the image (resize & convert)
def process_image(uploaded_file):
if uploaded_file is not None:
image = Image.open(uploaded_file)
image = image.resize((640, 640)) # Resize to 640x640
return image
return None
# Function to run YOLO inference
def run_yolo(image):
results = model(image) # Run YOLOv8 detection with PIL image directly
detected_classes, bounding_areas = calculate_bounding_box_area(results)
# Draw bounding boxes on the image and return the processed image
result_img = results[0].plot() # Draw bounding boxes on the image
result_pil = Image.fromarray(cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)) # Convert back to PIL
print(detected_classes, bounding_areas)
return result_pil, detected_classes, bounding_areas
# Streamlit UI
st.title("Nutri-AI")
uploaded_file = st.file_uploader("Upload an image (PNG, JPG, JPEG)", type=["png", "jpg", "jpeg"])
if uploaded_file:
resized_image = process_image(uploaded_file)
if resized_image:
st.image(resized_image, caption="Resized Image (640x640)", use_container_width=True)
st.write("๐Ÿ” Running YOLOv8 detection...")
detected_image, detected_classes, bounding_areas = run_yolo(resized_image)
print(detected_classes, bounding_areas)
# Display detected image
st.image(detected_image, caption="Detected Objects", use_container_width=True)
# Show detected classes, bounding box areas, and food nutrition info
for i, class_name in enumerate(detected_classes):
st.subheader(f"๐Ÿฒ Detected: {class_name.capitalize()}")
st.write(f"๐ŸŸก **Bounding Box Area:** {bounding_areas[i]:,.2f} pixelsยฒ")
if class_name in foods:
st.write("๐Ÿ“Š **Nutritional Information:**")
for key, value in foods[class_name].items():
st.write(f"๐Ÿ”น {key.capitalize()}: {value}")