File size: 3,641 Bytes
41d2e20 e6d8d9e 41d2e20 09a47b2 41d2e20 070adfa 09a47b2 070adfa e6d8d9e 070adfa 362383f 77bb7f9 070adfa e6d8d9e 8a05b4e e6d8d9e 362383f 070adfa 362383f 41d2e20 bfa92b0 41d2e20 bfa92b0 41d2e20 610dcf3 41d2e20 070adfa 41d2e20 77bb7f9 e6d8d9e 77bb7f9 41d2e20 0bafacf 41d2e20 070adfa 41d2e20 bfa92b0 41d2e20 bfa92b0 41d2e20 070adfa 77bb7f9 e6d8d9e 77bb7f9 070adfa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import streamlit as st
from ultralytics import YOLO
from huggingface_hub import hf_hub_download
from PIL import Image
import io
import cv2
import numpy as np
# Load YOLOv8 model from Hugging Face
repo_id = "Norphel/nutri-ai-n2" # Replace with your Hugging Face model repo
model_file = "best.pt"
model_path = hf_hub_download(repo_id=repo_id, filename=model_file)
model = YOLO(model_path)
# Food nutrition data
foods = {
"kewadatsi": {
"calories": 400,
"protein": "10g",
"fat": "0.4g",
"carbohydrates": "92g",
"sodium": "20mg"
},
"emadatsi": {
"calories": 275,
"protein": "10g",
"fat": "0.4g",
"carbohydrates": "92g",
"sodium": "850mg"
},
"rice": {
"calories": 130,
"protein": "2.7g",
"fat": "0.3g",
"carbohydrates": "28g",
"sodium": "1mg"
}
}
# Function to calculate bounding box area
def calculate_bounding_box_area(results):
areas = []
class_names = []
# Iterate through all bounding boxes
for box in results[0].boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0].tolist()) # Bounding box coordinates
area = (x2 - x1) * (y2 - y1) # Calculate area
areas.append(area)
# Get the class ID for the current bounding box
class_ids = box.cls.tolist() # This returns a list of class IDs
print(class_ids)
# Convert class ID(s) to class name(s)
for class_id in class_ids:
class_name = model.names[int(class_id)] # Convert class ID to class name
class_names.append(class_name) # Add class name to list
return class_names, areas
# Function to process the image (resize & convert)
def process_image(uploaded_file):
if uploaded_file is not None:
image = Image.open(uploaded_file)
image = image.resize((640, 640)) # Resize to 640x640
return image
return None
# Function to run YOLO inference
def run_yolo(image):
results = model(image) # Run YOLOv8 detection with PIL image directly
detected_classes, bounding_areas = calculate_bounding_box_area(results)
# Draw bounding boxes on the image and return the processed image
result_img = results[0].plot() # Draw bounding boxes on the image
result_pil = Image.fromarray(cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)) # Convert back to PIL
print(detected_classes, bounding_areas)
return result_pil, detected_classes, bounding_areas
# Streamlit UI
st.title("Nutri-AI")
uploaded_file = st.file_uploader("Upload an image (PNG, JPG, JPEG)", type=["png", "jpg", "jpeg"])
if uploaded_file:
resized_image = process_image(uploaded_file)
if resized_image:
st.image(resized_image, caption="Resized Image (640x640)", use_container_width=True)
st.write("🔍 Running YOLOv8 detection...")
detected_image, detected_classes, bounding_areas = run_yolo(resized_image)
print(detected_classes, bounding_areas)
# Display detected image
st.image(detected_image, caption="Detected Objects", use_container_width=True)
# Show detected classes, bounding box areas, and food nutrition info
for i, class_name in enumerate(detected_classes):
st.subheader(f"🍲 Detected: {class_name.capitalize()}")
st.write(f"🟡 **Bounding Box Area:** {bounding_areas[i]:,.2f} pixels²")
if class_name in foods:
st.write("📊 **Nutritional Information:**")
for key, value in foods[class_name].items():
st.write(f"🔹 {key.capitalize()}: {value}")
|