File size: 1,380 Bytes
43d33b9 f880152 0f06d5c f880152 bb4da5f f78bc56 0f06d5c b080eae f880152 43d33b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from fastapi import FastAPI, File, UploadFile
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
import numpy as np
from io import BytesIO
from PIL import Image
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
import os
app = FastAPI()
# Desativa logs menos importantes do TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Desativa as otimizações do oneDNN
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
@app.get("/logs=container")
async def container_logs():
return {"status": "No logs available"}
@app.get("/")
def greet_json():
return {"Hello": "World!"}
app = FastAPI()
# Carregar o modelo MobileNetV2
model = MobileNetV2(weights="imagenet")
def prepare_image(img):
img = img.resize((224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
return preprocess_input(img_array)
@app.post("/predict")
async def predict(file: UploadFile = File(...)):
contents = await file.read()
img = Image.open(BytesIO(contents)).convert("RGB")
processed_image = prepare_image(img)
predictions = model.predict(processed_image)
results = decode_predictions(predictions, top=3)[0]
return [{"label": label, "probability": float(prob)} for (_, label, prob) in results]
|