Spaces:
Sleeping
Sleeping
Commit
路
0efb498
1
Parent(s):
f93586a
ImageClassificationSpace\nAPI en HuggingFace Space con TensorFlow Serving-like pipeline.
Browse files- app.py +21 -21
- my_classification_model_float16.tflite +1 -1
app.py
CHANGED
|
@@ -26,7 +26,7 @@ class ImagePayload(BaseModel):
|
|
| 26 |
@app.get("/")
|
| 27 |
def home():
|
| 28 |
return {
|
| 29 |
-
"status": "ok",
|
| 30 |
"message": "API is running! Use POST /predict",
|
| 31 |
"model_info": {
|
| 32 |
"input_shape": input_details[0]['shape'].tolist(),
|
|
@@ -40,48 +40,48 @@ def home():
|
|
| 40 |
def preprocess_image(img_bytes, target_size=(224, 224)):
|
| 41 |
"""
|
| 42 |
Preprocesa la imagen usando NumPy y PIL
|
| 43 |
-
|
| 44 |
Args:
|
| 45 |
img_bytes: Bytes de la imagen
|
| 46 |
target_size: Tupla (height, width)
|
| 47 |
-
|
| 48 |
Returns:
|
| 49 |
Imagen preprocesada como numpy array
|
| 50 |
"""
|
| 51 |
# Decodificar imagen con PIL
|
| 52 |
img = Image.open(io.BytesIO(img_bytes))
|
| 53 |
-
|
| 54 |
# Convertir a RGB si es necesario
|
| 55 |
if img.mode != 'RGB':
|
| 56 |
img = img.convert('RGB')
|
| 57 |
-
|
| 58 |
# Redimensionar
|
| 59 |
img = img.resize(target_size, Image.BILINEAR)
|
| 60 |
-
|
| 61 |
# Convertir a numpy array
|
| 62 |
img_array = np.array(img, dtype=np.float32)
|
| 63 |
-
|
| 64 |
# Normalizar a [0, 1]
|
| 65 |
img_array = img_array / 255.0
|
| 66 |
-
|
| 67 |
# Expandir dimensiones para batch
|
| 68 |
img_array = np.expand_dims(img_array, axis=0)
|
| 69 |
-
|
| 70 |
# Si es modelo INT8, convertir directamente a uint8 [0, 255]
|
| 71 |
# El modelo internamente hace el escalado y zero point
|
| 72 |
if IS_INT8_MODEL:
|
| 73 |
# Volver a escala [0, 255] y convertir a uint8
|
| 74 |
img_array = (img_array).astype(np.uint8)
|
| 75 |
-
|
| 76 |
return img_array
|
| 77 |
|
| 78 |
def postprocess_output(output):
|
| 79 |
"""
|
| 80 |
Postprocesa la salida del modelo
|
| 81 |
-
|
| 82 |
Args:
|
| 83 |
output: Salida raw del modelo
|
| 84 |
-
|
| 85 |
Returns:
|
| 86 |
Probabilidades como lista
|
| 87 |
"""
|
|
@@ -91,7 +91,7 @@ def postprocess_output(output):
|
|
| 91 |
if IS_INT8_MODEL:
|
| 92 |
# Convertir de uint8 [0, 255] a float [0, 1]
|
| 93 |
output = output.astype(np.float32)
|
| 94 |
-
|
| 95 |
# El modelo ya tiene softmax, as铆 que solo convertir a lista
|
| 96 |
return output[0].tolist()
|
| 97 |
|
|
@@ -99,32 +99,32 @@ def postprocess_output(output):
|
|
| 99 |
def predict(payload: ImagePayload):
|
| 100 |
"""
|
| 101 |
Endpoint de predicci贸n
|
| 102 |
-
|
| 103 |
Args:
|
| 104 |
payload: JSON con imagen en base64
|
| 105 |
-
|
| 106 |
Returns:
|
| 107 |
Predicciones del modelo
|
| 108 |
"""
|
| 109 |
try:
|
| 110 |
# Decodificar base64
|
| 111 |
img_bytes = base64.b64decode(payload.image_base64)
|
| 112 |
-
|
| 113 |
# Preprocesar imagen
|
| 114 |
img_array = preprocess_image(img_bytes, target_size=(224, 224))
|
| 115 |
-
|
| 116 |
# Inferencia con AI Edge LiteRT
|
| 117 |
litert_interpreter.set_tensor(input_details[0]['index'], img_array)
|
| 118 |
litert_interpreter.invoke()
|
| 119 |
output = litert_interpreter.get_tensor(output_details[0]['index'])
|
| 120 |
-
|
| 121 |
# Postprocesar salida
|
| 122 |
predictions = postprocess_output(output)
|
| 123 |
-
|
| 124 |
# Obtener clase predicha y confianza
|
| 125 |
predicted_class = int(np.argmax(predictions))
|
| 126 |
confidence = float(predictions[predicted_class])
|
| 127 |
-
|
| 128 |
return {
|
| 129 |
"prediction": predictions,
|
| 130 |
"predicted_class": predicted_class,
|
|
@@ -135,7 +135,7 @@ def predict(payload: ImagePayload):
|
|
| 135 |
reverse=True
|
| 136 |
)[:5]
|
| 137 |
}
|
| 138 |
-
|
| 139 |
except Exception as e:
|
| 140 |
return {
|
| 141 |
"error": str(e),
|
|
|
|
| 26 |
@app.get("/")
|
| 27 |
def home():
|
| 28 |
return {
|
| 29 |
+
"status": "ok",
|
| 30 |
"message": "API is running! Use POST /predict",
|
| 31 |
"model_info": {
|
| 32 |
"input_shape": input_details[0]['shape'].tolist(),
|
|
|
|
| 40 |
def preprocess_image(img_bytes, target_size=(224, 224)):
|
| 41 |
"""
|
| 42 |
Preprocesa la imagen usando NumPy y PIL
|
| 43 |
+
|
| 44 |
Args:
|
| 45 |
img_bytes: Bytes de la imagen
|
| 46 |
target_size: Tupla (height, width)
|
| 47 |
+
|
| 48 |
Returns:
|
| 49 |
Imagen preprocesada como numpy array
|
| 50 |
"""
|
| 51 |
# Decodificar imagen con PIL
|
| 52 |
img = Image.open(io.BytesIO(img_bytes))
|
| 53 |
+
|
| 54 |
# Convertir a RGB si es necesario
|
| 55 |
if img.mode != 'RGB':
|
| 56 |
img = img.convert('RGB')
|
| 57 |
+
|
| 58 |
# Redimensionar
|
| 59 |
img = img.resize(target_size, Image.BILINEAR)
|
| 60 |
+
|
| 61 |
# Convertir a numpy array
|
| 62 |
img_array = np.array(img, dtype=np.float32)
|
| 63 |
+
|
| 64 |
# Normalizar a [0, 1]
|
| 65 |
img_array = img_array / 255.0
|
| 66 |
+
|
| 67 |
# Expandir dimensiones para batch
|
| 68 |
img_array = np.expand_dims(img_array, axis=0)
|
| 69 |
+
|
| 70 |
# Si es modelo INT8, convertir directamente a uint8 [0, 255]
|
| 71 |
# El modelo internamente hace el escalado y zero point
|
| 72 |
if IS_INT8_MODEL:
|
| 73 |
# Volver a escala [0, 255] y convertir a uint8
|
| 74 |
img_array = (img_array).astype(np.uint8)
|
| 75 |
+
|
| 76 |
return img_array
|
| 77 |
|
| 78 |
def postprocess_output(output):
|
| 79 |
"""
|
| 80 |
Postprocesa la salida del modelo
|
| 81 |
+
|
| 82 |
Args:
|
| 83 |
output: Salida raw del modelo
|
| 84 |
+
|
| 85 |
Returns:
|
| 86 |
Probabilidades como lista
|
| 87 |
"""
|
|
|
|
| 91 |
if IS_INT8_MODEL:
|
| 92 |
# Convertir de uint8 [0, 255] a float [0, 1]
|
| 93 |
output = output.astype(np.float32)
|
| 94 |
+
|
| 95 |
# El modelo ya tiene softmax, as铆 que solo convertir a lista
|
| 96 |
return output[0].tolist()
|
| 97 |
|
|
|
|
| 99 |
def predict(payload: ImagePayload):
|
| 100 |
"""
|
| 101 |
Endpoint de predicci贸n
|
| 102 |
+
|
| 103 |
Args:
|
| 104 |
payload: JSON con imagen en base64
|
| 105 |
+
|
| 106 |
Returns:
|
| 107 |
Predicciones del modelo
|
| 108 |
"""
|
| 109 |
try:
|
| 110 |
# Decodificar base64
|
| 111 |
img_bytes = base64.b64decode(payload.image_base64)
|
| 112 |
+
|
| 113 |
# Preprocesar imagen
|
| 114 |
img_array = preprocess_image(img_bytes, target_size=(224, 224))
|
| 115 |
+
|
| 116 |
# Inferencia con AI Edge LiteRT
|
| 117 |
litert_interpreter.set_tensor(input_details[0]['index'], img_array)
|
| 118 |
litert_interpreter.invoke()
|
| 119 |
output = litert_interpreter.get_tensor(output_details[0]['index'])
|
| 120 |
+
|
| 121 |
# Postprocesar salida
|
| 122 |
predictions = postprocess_output(output)
|
| 123 |
+
|
| 124 |
# Obtener clase predicha y confianza
|
| 125 |
predicted_class = int(np.argmax(predictions))
|
| 126 |
confidence = float(predictions[predicted_class])
|
| 127 |
+
|
| 128 |
return {
|
| 129 |
"prediction": predictions,
|
| 130 |
"predicted_class": predicted_class,
|
|
|
|
| 135 |
reverse=True
|
| 136 |
)[:5]
|
| 137 |
}
|
| 138 |
+
|
| 139 |
except Exception as e:
|
| 140 |
return {
|
| 141 |
"error": str(e),
|
my_classification_model_float16.tflite
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 22375136
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb372588dfae0c9a39260570e02bb186987ee0cbe00919de0fc344de92d00d80
|
| 3 |
size 22375136
|