Spaces:
Running
Running
Mawube
commited on
Commit
•
30c5114
0
Parent(s):
Initial commit
Browse files- .gitattributes +2 -0
- .gitignore +160 -0
- Dockerfile +38 -0
- README.md +24 -0
- main.py +96 -0
- models/__init__.py +0 -0
- models/ghostnet_model_float32.tflite +3 -0
- models/interpreter_state.json +3 -0
- requirements.txt +10 -0
- utils/__init__.py +0 -0
- utils/audio_generation.py +0 -0
- utils/image_preclassification.py +69 -0
- utils/palmoil_classification.py +57 -0
- utils/preprocessing.py +43 -0
.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
.vscode/
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
Dockerfile
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the Ubuntu image.
|
2 |
+
FROM python:3.11-slim-buster
|
3 |
+
|
4 |
+
# Allow statements and log messages to immediately appear in the logs
|
5 |
+
ENV PYTHONUNBUFFERED True
|
6 |
+
|
7 |
+
# Copy local code to the container image.
|
8 |
+
WORKDIR /code
|
9 |
+
COPY . /code/
|
10 |
+
|
11 |
+
# Delete the numpy package from cache
|
12 |
+
RUN rm -rf /root/.cache/pip
|
13 |
+
|
14 |
+
# Install system dependencies for pyworld
|
15 |
+
RUN apt-get update && \
|
16 |
+
apt-get install -y && \
|
17 |
+
apt-get -y update &&\
|
18 |
+
apt-get install -y python3-pip python3-dev python3-opencv && \
|
19 |
+
rm -rf /var/lib/apt/lists/*
|
20 |
+
|
21 |
+
# Add write user
|
22 |
+
RUN useradd -m -u 1000 user
|
23 |
+
USER user
|
24 |
+
ENV HOME=/home/user \
|
25 |
+
PATH=/home/user/.local/bin:$PATH
|
26 |
+
|
27 |
+
WORKDIR $HOME/app
|
28 |
+
|
29 |
+
COPY --chown=user . $HOME/app
|
30 |
+
|
31 |
+
# Install production dependencies.
|
32 |
+
RUN pip install --no-cache-dir -r /code/requirements.txt
|
33 |
+
|
34 |
+
# Expose the app port
|
35 |
+
EXPOSE 7860
|
36 |
+
|
37 |
+
# Run the FastAPI application using Uvicorn server.
|
38 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Afro-Palm
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: blue
|
6 |
+
sdk: docker
|
7 |
+
pinned: true
|
8 |
+
license: mit
|
9 |
+
---
|
10 |
+
# Backend
|
11 |
+
This repository serves as a central hub for all backend code related to our project. It includes server-side code, database management scripts, APIs, and other related files that support the application's core functionality.
|
12 |
+
|
13 |
+
# Getting Started
|
14 |
+
In order to get started install the packages from the requirements.txt using
|
15 |
+
|
16 |
+
```pip install -r requirements.txt```
|
17 |
+
|
18 |
+
cd into the main directory in your terminal and run the ff to start the server;
|
19 |
+
```uvicorn main:app --reload```
|
20 |
+
|
21 |
+
|
22 |
+
# Documentation
|
23 |
+
To access to the documentation:
|
24 |
+
Use the url + `/docs`
|
main.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from fastapi.responses import JSONResponse
|
4 |
+
from utils.palmoil_classification import AfroPalmModel
|
5 |
+
from utils.image_preclassification import pre_classification
|
6 |
+
from utils.audio_generation import AudioGeneration
|
7 |
+
import logging
|
8 |
+
import uvicorn
|
9 |
+
import base64
|
10 |
+
import io
|
11 |
+
import numpy as np
|
12 |
+
from io import BytesIO
|
13 |
+
from pydantic import BaseModel
|
14 |
+
|
15 |
+
|
16 |
+
# Logging
|
17 |
+
logging.basicConfig(level=logging.DEBUG,format='%(levelname)s: %(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
|
18 |
+
|
19 |
+
|
20 |
+
description = """
|
21 |
+
## Welcome to the Red Palm Oil Adulteration Detection Backend Api
|
22 |
+
"""
|
23 |
+
|
24 |
+
class ImageRequest(BaseModel):
|
25 |
+
image: str
|
26 |
+
imageURL:str
|
27 |
+
lang: int
|
28 |
+
|
29 |
+
# Initialize FastAPI
|
30 |
+
app = FastAPI(title="Afro Red Palm Oil Project", description=description)
|
31 |
+
|
32 |
+
app.add_middleware(
|
33 |
+
CORSMiddleware,
|
34 |
+
allow_origins=['*'],
|
35 |
+
allow_methods=['*'],
|
36 |
+
allow_headers=["*"],
|
37 |
+
)
|
38 |
+
|
39 |
+
"""
|
40 |
+
API Routes
|
41 |
+
"""
|
42 |
+
# Home route
|
43 |
+
|
44 |
+
|
45 |
+
@app.get('/')
|
46 |
+
async def home():
|
47 |
+
return {description}
|
48 |
+
|
49 |
+
# red palm oil classification endpoint
|
50 |
+
@app.post("/predict")
|
51 |
+
async def predict(image_request: ImageRequest):
|
52 |
+
logging.info("Loading image")
|
53 |
+
|
54 |
+
# Decode base64 image string
|
55 |
+
decoded_image = base64.b64decode(image_request.image)
|
56 |
+
|
57 |
+
# Create a BytesIO object to read the image data
|
58 |
+
image_bytes = BytesIO(decoded_image)
|
59 |
+
|
60 |
+
try:
|
61 |
+
# Pre-classify image
|
62 |
+
is_palm_oil = pre_classification(image_request.imageURL)
|
63 |
+
|
64 |
+
logging.info("Pre-classification successful")
|
65 |
+
|
66 |
+
if is_palm_oil:
|
67 |
+
model = AfroPalmModel()
|
68 |
+
prediction,confidence = model.predict(image_bytes)
|
69 |
+
logging.debug(f"Prediction: {prediction}, Confidence: {confidence*100:.2f}%")
|
70 |
+
|
71 |
+
# Generate audio
|
72 |
+
audio_generation = AudioGeneration(prediction=prediction, confidence=confidence*100, language=image_request.lang)
|
73 |
+
translated_text = audio_generation.ghanaian_language_translator()
|
74 |
+
|
75 |
+
else:
|
76 |
+
logging.info("Image is not a red palm oil")
|
77 |
+
return JSONResponse(status_code=418, content={"status": "error",
|
78 |
+
'error':"Image is not a red palm oil"})
|
79 |
+
|
80 |
+
# model = AfroPalmModel()
|
81 |
+
# prediction,confidence = model.predict(image_bytes)
|
82 |
+
|
83 |
+
return {
|
84 |
+
"status": "success",
|
85 |
+
"result": prediction,
|
86 |
+
"confidence": f"{confidence*100:.2f}"
|
87 |
+
}
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
logging.error(e)
|
91 |
+
raise HTTPException(status_code=500, detail={"status": "error",
|
92 |
+
'error': str(e)})
|
93 |
+
|
94 |
+
|
95 |
+
if __name__ == '__main__':
|
96 |
+
uvicorn.run(app, host="0.0.0.0", port="8000", debug=True)
|
models/__init__.py
ADDED
File without changes
|
models/ghostnet_model_float32.tflite
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90677f4651e27591be195d9ee9fc5d2f7e2699e6ee946374e4f46f7c127a8fe1
|
3 |
+
size 15635308
|
models/interpreter_state.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:444bab4531acce84c03477702eaf8cd3fd52c3ca9f901f83ffca1fb2ad3f6423
|
3 |
+
size 62234857
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
gunicorn
|
4 |
+
python-multipart
|
5 |
+
numpy
|
6 |
+
torch
|
7 |
+
torchvision
|
8 |
+
opencv-python
|
9 |
+
tensorflow
|
10 |
+
roboflow
|
utils/__init__.py
ADDED
File without changes
|
utils/audio_generation.py
ADDED
File without changes
|
utils/image_preclassification.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from roboflow import Roboflow
|
2 |
+
import logging
|
3 |
+
import requests
|
4 |
+
from urllib.parse import urlparse
|
5 |
+
import os
|
6 |
+
import uuid
|
7 |
+
|
8 |
+
|
9 |
+
rf = Roboflow(api_key="13nZhBkEhxImZULD6bZW")
|
10 |
+
project = rf.workspace("afropalm").project("afropalm-pre-classification")
|
11 |
+
model = project.version(1).model
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
def pre_classification(imageURL):
|
16 |
+
|
17 |
+
logging.info("Checking if image is red palm oil")
|
18 |
+
|
19 |
+
response = requests.get(imageURL)
|
20 |
+
|
21 |
+
if response.status_code == 200:
|
22 |
+
|
23 |
+
# Parse the URL to get the file name and extension
|
24 |
+
parsed_url = urlparse(imageURL)
|
25 |
+
file_name = parsed_url.path.split("/")[-1]
|
26 |
+
file_extension = file_name.split(".")[-1]
|
27 |
+
|
28 |
+
id = uuid.uuid4()
|
29 |
+
|
30 |
+
# Save the downloaded image to a file with a dynamic name and extension
|
31 |
+
filename = f"{id}.{file_extension}"
|
32 |
+
|
33 |
+
# Save the downloaded image to a file
|
34 |
+
with open(filename, 'wb') as image_file:
|
35 |
+
image_file.write(response.content)
|
36 |
+
|
37 |
+
# Use the file path of the downloaded image
|
38 |
+
image_path = os.path.dirname(os.path.abspath(filename))+'/'+filename
|
39 |
+
|
40 |
+
results = model.predict(image_path, confidence=1, overlap=30).json()
|
41 |
+
|
42 |
+
class_name = None
|
43 |
+
confidence = 0
|
44 |
+
|
45 |
+
if results['predictions']:
|
46 |
+
# loop through results and find the highest confidence
|
47 |
+
for result in results["predictions"]:
|
48 |
+
if result["confidence"] > confidence:
|
49 |
+
class_name = result["class"]
|
50 |
+
confidence = result["confidence"]
|
51 |
+
|
52 |
+
|
53 |
+
if os.path.isfile(image_path):
|
54 |
+
# Remove the file after processing
|
55 |
+
os.remove(image_path)
|
56 |
+
|
57 |
+
if class_name == "palmoil":
|
58 |
+
return True
|
59 |
+
|
60 |
+
else:
|
61 |
+
return False
|
62 |
+
|
63 |
+
|
64 |
+
else:
|
65 |
+
return False
|
66 |
+
|
67 |
+
else:
|
68 |
+
logging.error("Error loading the image for pre-classification")
|
69 |
+
raise Exception("Error loading the image")
|
utils/palmoil_classification.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils.preprocessing import preprocess_image
|
2 |
+
import tensorflow as tf
|
3 |
+
import os
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
class AfroPalmModel:
|
10 |
+
"""
|
11 |
+
Class to load the model and make predictions
|
12 |
+
"""
|
13 |
+
def __init__(self):
|
14 |
+
logging.info("Loading classification model")
|
15 |
+
|
16 |
+
self.interpreter_path = os.path.dirname(os.path.abspath("interpreter_state.json")) + "/models/interpreter_state.json"
|
17 |
+
|
18 |
+
logging.debug(f"Preparing to read from {self.interpreter_path}")
|
19 |
+
|
20 |
+
with open(self.interpreter_path, 'r') as json_file:
|
21 |
+
interpreter_state = json.load(json_file)
|
22 |
+
|
23 |
+
logging.info("File read successful")
|
24 |
+
|
25 |
+
# Create and allocate the interpreter using the loaded state
|
26 |
+
self.interpreter = tf.lite.Interpreter(model_content=interpreter_state["tflite_model"].encode('latin1'))
|
27 |
+
self.interpreter.allocate_tensors()
|
28 |
+
|
29 |
+
# Retrieve the input and output indices
|
30 |
+
self.input_index = interpreter_state["input_index"]
|
31 |
+
self.output_index = interpreter_state["output_index"]
|
32 |
+
|
33 |
+
logging.info("Model loaded successful")
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
def predict(self, image):
|
39 |
+
"""
|
40 |
+
Make a prediction on the image
|
41 |
+
:param image: image to make prediction on
|
42 |
+
:return: prediction and confidence score
|
43 |
+
"""
|
44 |
+
|
45 |
+
logging.info("Making prediction")
|
46 |
+
|
47 |
+
img = preprocess_image(image)
|
48 |
+
|
49 |
+
self.interpreter.set_tensor(self.input_index, img)
|
50 |
+
self.interpreter.invoke()
|
51 |
+
|
52 |
+
|
53 |
+
predictions = list(self.interpreter.get_tensor(self.output_index)[0])
|
54 |
+
|
55 |
+
logging.info("Classification successful")
|
56 |
+
|
57 |
+
return predictions.index(max(predictions)), max(predictions)
|
utils/preprocessing.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from torchvision.transforms import functional as TF
|
3 |
+
from torchvision.transforms import transforms
|
4 |
+
from PIL import Image
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
def to_hsv(image):
|
10 |
+
"""Convert PIL image to HSV color space"""
|
11 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2HSV)
|
12 |
+
return TF.to_pil_image(image)
|
13 |
+
|
14 |
+
|
15 |
+
# Define the preprocessing function for a single image
|
16 |
+
def preprocess_image(image):
|
17 |
+
# Load the image
|
18 |
+
img = Image.open(image)
|
19 |
+
|
20 |
+
# Ensure the image is in RGB format
|
21 |
+
if isinstance(img, Image.Image):
|
22 |
+
img = img.convert("RGB")
|
23 |
+
elif isinstance(img, (np.ndarray, np.generic)):
|
24 |
+
if img.shape[-1] == 1: # Grayscale image
|
25 |
+
img = np.stack([img, img, img], axis=-1)
|
26 |
+
elif img.shape[-1] != 3: # Not RGB or Grayscale
|
27 |
+
raise ValueError("Input image must be in RGB or Grayscale format")
|
28 |
+
else:
|
29 |
+
raise ValueError("Unsupported image type")
|
30 |
+
|
31 |
+
# Define the transformations
|
32 |
+
transform = transforms.Compose([
|
33 |
+
transforms.Resize((224, 224)),
|
34 |
+
transforms.ToTensor(),
|
35 |
+
])
|
36 |
+
|
37 |
+
# Apply the transformations to the image
|
38 |
+
preprocessed_img = transform(img)
|
39 |
+
|
40 |
+
# Add batch dimension and reorder dimensions to [1, 224, 224, 3]
|
41 |
+
preprocessed_img = preprocessed_img.unsqueeze(0).permute(0, 2, 3, 1)
|
42 |
+
|
43 |
+
return preprocessed_img
|