Spaces:
Running
Running
from fastapi import FastAPI, HTTPException | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.responses import JSONResponse | |
from utils.palmoil_classification import AfroPalmModel | |
from utils.image_preclassification import pre_classification | |
from utils.audio_generation import AudioGeneration | |
import logging | |
import uvicorn | |
import base64 | |
import io | |
import numpy as np | |
from io import BytesIO | |
from pydantic import BaseModel | |
import os | |
# Logging | |
logging.basicConfig(level=logging.DEBUG,format='%(levelname)s: %(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') | |
description = """ | |
## Welcome to the Red Palm Oil Adulteration Detection Backend Api | |
""" | |
class ImageRequest(BaseModel): | |
imageURL:str | |
# Initialize FastAPI | |
app = FastAPI(title="Afro Red Palm Oil Project", description=description) | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=['*'], | |
allow_methods=['*'], | |
allow_headers=["*"], | |
) | |
""" | |
API Routes | |
""" | |
# Home route | |
async def home(): | |
return {description} | |
# red palm oil classification endpoint | |
async def predict(image_request: ImageRequest): | |
# logging.info("Loading image") | |
# # Decode base64 image string | |
# decoded_image = base64.b64decode(image_request.image) | |
# # Create a BytesIO object to read the image data | |
# image_bytes = BytesIO(decoded_image) | |
try: | |
# Pre-classify image | |
is_palm_oil, image_path = pre_classification(image_request.imageURL) | |
logging.info("Pre-classification successful") | |
if is_palm_oil: | |
model = AfroPalmModel() | |
prediction,confidence = model.predict(image_path) | |
logging.debug(f"Prediction: {prediction}, Confidence: {confidence*100:.2f}%") | |
# Generate audio | |
# audio_generation = AudioGeneration(prediction=prediction, confidence=confidence*100, language=image_request.lang) | |
# translated_text = audio_generation.ghanaian_language_translator() | |
# translated_text_audiofile = audio_generation.text_to_audio(translated_text) | |
else: | |
logging.info("Image is not a red palm oil") | |
return JSONResponse(status_code=418, content={"status": "error", | |
'error':"Image is not a red palm oil"}) | |
# model = AfroPalmModel() | |
# prediction,confidence = model.predict(image_bytes) | |
if os.path.isfile(image_path): | |
# Remove the file after processing | |
os.remove(image_path) | |
return { | |
"status": "success", | |
"result": prediction, | |
"confidence": f"{confidence*100:.2f}", | |
} | |
# "audio": FileResponse(path='final_result.wav', media_type="audio/mpeg", filename="final_result.wav") | |
except Exception as e: | |
logging.error(e) | |
raise HTTPException(status_code=500, detail={"status": "error", | |
'error': str(e)}) | |
if __name__ == '__main__': | |
uvicorn.run(app, host="0.0.0.0", port="8000", debug=True) |