Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- Dockerfile +15 -0
- app.py +68 -0
- inference_roof_type.py +92 -0
- ocr_proc.py +74 -0
- one_shot_model.py +124 -0
- package.json +8 -0
- requirements.txt +7 -0
- roof_type_cnn_best.pth +3 -0
Dockerfile
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Base image with Python
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy files
|
| 8 |
+
COPY requirements.txt ./
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
COPY app ./app
|
| 12 |
+
|
| 13 |
+
# Expose port (HF listens on $PORT)
|
| 14 |
+
ENV PORT 7860
|
| 15 |
+
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException
|
| 2 |
+
from fastapi.responses import RedirectResponse
|
| 3 |
+
from typing import Dict
|
| 4 |
+
import torch
|
| 5 |
+
import io
|
| 6 |
+
import os
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
# Local imports
|
| 10 |
+
from ocr_proc import extract_meter_info
|
| 11 |
+
from inference_roof_type import RoofClassifierCNN, transform, CLASS_NAMES, DEVICE
|
| 12 |
+
|
| 13 |
+
# FastAPI app
|
| 14 |
+
app = FastAPI(
|
| 15 |
+
title="Electric Meter + Roof Classifier API",
|
| 16 |
+
description="Electric Meter OCR and Roof Type Classification",
|
| 17 |
+
version="1.0.0",
|
| 18 |
+
docs_url="/docs",
|
| 19 |
+
redoc_url="/redoc",
|
| 20 |
+
openapi_url="/openapi.json"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Redirect root to Swagger UI
|
| 24 |
+
@app.get("/", include_in_schema=False)
|
| 25 |
+
async def root():
|
| 26 |
+
return RedirectResponse(url="/docs")
|
| 27 |
+
|
| 28 |
+
@app.get("/health", tags=["Health"])
|
| 29 |
+
async def health_check():
|
| 30 |
+
return {"status": "healthy", "message": "API is running"}
|
| 31 |
+
|
| 32 |
+
# Load the roof model once (on cold start)
|
| 33 |
+
roof_model = RoofClassifierCNN().to(DEVICE)
|
| 34 |
+
roof_model.load_state_dict(torch.load("roof_type_cnn_best.pth", map_location=DEVICE))
|
| 35 |
+
roof_model.eval()
|
| 36 |
+
|
| 37 |
+
@app.post("/ocr/meter", tags=["OCR"])
|
| 38 |
+
async def extract_ocr_data(file: UploadFile = File(...)) -> Dict:
|
| 39 |
+
try:
|
| 40 |
+
contents = await file.read()
|
| 41 |
+
extracted_info = extract_meter_info(contents)
|
| 42 |
+
return {"success": True, "data": extracted_info}
|
| 43 |
+
except Exception as e:
|
| 44 |
+
raise HTTPException(status_code=500, detail=f"OCR processing failed: {str(e)}")
|
| 45 |
+
|
| 46 |
+
@app.post("/roof/classify", tags=["Roof Type Classifier"])
|
| 47 |
+
async def classify_roof(file: UploadFile = File(...)) -> Dict:
|
| 48 |
+
try:
|
| 49 |
+
image = Image.open(io.BytesIO(await file.read())).convert("RGB")
|
| 50 |
+
img_tensor = transform(image).unsqueeze(0).to(DEVICE)
|
| 51 |
+
|
| 52 |
+
with torch.no_grad():
|
| 53 |
+
outputs = roof_model(img_tensor)
|
| 54 |
+
_, predicted = torch.max(outputs, 1)
|
| 55 |
+
class_idx = predicted.item()
|
| 56 |
+
confidence = torch.softmax(outputs, dim=1)[0][class_idx].item()
|
| 57 |
+
|
| 58 |
+
return {
|
| 59 |
+
"success": True,
|
| 60 |
+
"predicted_roof_type": CLASS_NAMES[class_idx],
|
| 61 |
+
"confidence": f"{confidence * 100:.2f}%"
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
except Exception as e:
|
| 65 |
+
raise HTTPException(status_code=500, detail=f"Roof classification failed: {str(e)}")
|
| 66 |
+
|
| 67 |
+
# ✅ DO NOT add `if __name__ == "__main__"` block here.
|
| 68 |
+
# Render will run `uvicorn main:app --host 0.0.0.0 --port 10000`
|
inference_roof_type.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torchvision import transforms
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
# -------------------------------
|
| 8 |
+
# CONFIG
|
| 9 |
+
# -------------------------------
|
| 10 |
+
IMG_SIZE = 128
|
| 11 |
+
MODEL_PATH = "roof_type_cnn_best.pth"
|
| 12 |
+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
+
CLASS_NAMES = ['flat', 'pitched'] # Must match training order
|
| 14 |
+
|
| 15 |
+
# -------------------------------
|
| 16 |
+
# MODEL ARCHITECTURE (same as training)
|
| 17 |
+
# -------------------------------
|
| 18 |
+
class RoofClassifierCNN(nn.Module):
|
| 19 |
+
def __init__(self):
|
| 20 |
+
super(RoofClassifierCNN, self).__init__()
|
| 21 |
+
self.net = nn.Sequential(
|
| 22 |
+
nn.Conv2d(3, 16, kernel_size=3, padding=1),
|
| 23 |
+
nn.BatchNorm2d(16),
|
| 24 |
+
nn.ReLU(),
|
| 25 |
+
nn.MaxPool2d(2),
|
| 26 |
+
|
| 27 |
+
nn.Conv2d(16, 32, kernel_size=3, padding=1),
|
| 28 |
+
nn.BatchNorm2d(32),
|
| 29 |
+
nn.ReLU(),
|
| 30 |
+
nn.MaxPool2d(2),
|
| 31 |
+
|
| 32 |
+
nn.Conv2d(32, 64, kernel_size=3, padding=1),
|
| 33 |
+
nn.BatchNorm2d(64),
|
| 34 |
+
nn.ReLU(),
|
| 35 |
+
nn.MaxPool2d(2),
|
| 36 |
+
)
|
| 37 |
+
self.fc = nn.Sequential(
|
| 38 |
+
nn.Flatten(),
|
| 39 |
+
nn.Linear(64 * 16 * 16, 128),
|
| 40 |
+
nn.ReLU(),
|
| 41 |
+
nn.Dropout(0.3),
|
| 42 |
+
nn.Linear(128, 2)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
x = self.net(x)
|
| 47 |
+
return self.fc(x)
|
| 48 |
+
|
| 49 |
+
# -------------------------------
|
| 50 |
+
# TRANSFORMS
|
| 51 |
+
# -------------------------------
|
| 52 |
+
transform = transforms.Compose([
|
| 53 |
+
transforms.Resize((IMG_SIZE, IMG_SIZE)),
|
| 54 |
+
transforms.ToTensor(),
|
| 55 |
+
transforms.Normalize([0.5]*3, [0.5]*3)
|
| 56 |
+
])
|
| 57 |
+
|
| 58 |
+
# -------------------------------
|
| 59 |
+
# LOAD MODEL
|
| 60 |
+
# -------------------------------
|
| 61 |
+
model = RoofClassifierCNN().to(DEVICE)
|
| 62 |
+
model.load_state_dict(torch.load(MODEL_PATH, map_location=DEVICE))
|
| 63 |
+
model.eval()
|
| 64 |
+
|
| 65 |
+
# -------------------------------
|
| 66 |
+
# PREDICTION FUNCTION
|
| 67 |
+
# -------------------------------
|
| 68 |
+
def predict(image_path):
|
| 69 |
+
try:
|
| 70 |
+
image = Image.open(image_path).convert('RGB')
|
| 71 |
+
except:
|
| 72 |
+
print(f"❌ Failed to open image: {image_path}")
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
img_tensor = transform(image).unsqueeze(0).to(DEVICE)
|
| 76 |
+
|
| 77 |
+
with torch.no_grad():
|
| 78 |
+
outputs = model(img_tensor)
|
| 79 |
+
_, predicted = torch.max(outputs, 1)
|
| 80 |
+
class_idx = predicted.item()
|
| 81 |
+
confidence = torch.softmax(outputs, dim=1)[0][class_idx].item()
|
| 82 |
+
|
| 83 |
+
print(f"✅ Prediction: {CLASS_NAMES[class_idx]} (Confidence: {confidence*100:.2f}%)")
|
| 84 |
+
|
| 85 |
+
# -------------------------------
|
| 86 |
+
# MAIN
|
| 87 |
+
# -------------------------------
|
| 88 |
+
if __name__ == "__main__":
|
| 89 |
+
if len(sys.argv) != 2:
|
| 90 |
+
print("Usage: python inference_roof_type.py <image_path>")
|
| 91 |
+
else:
|
| 92 |
+
predict(sys.argv[1])
|
ocr_proc.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import easyocr
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
reader = easyocr.Reader(['en'])
|
| 7 |
+
|
| 8 |
+
def preprocess_image(image):
|
| 9 |
+
|
| 10 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 11 |
+
thresh = cv2.adaptiveThreshold(
|
| 12 |
+
gray, 255,
|
| 13 |
+
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
| 14 |
+
cv2.THRESH_BINARY_INV,
|
| 15 |
+
11, 2
|
| 16 |
+
)
|
| 17 |
+
return thresh
|
| 18 |
+
|
| 19 |
+
def extract_meter_info(image_bytes):
|
| 20 |
+
|
| 21 |
+
np_img = np.frombuffer(image_bytes, np.uint8)
|
| 22 |
+
image = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
|
| 23 |
+
processed_image = preprocess_image(image)
|
| 24 |
+
results = reader.readtext(processed_image)
|
| 25 |
+
print("[OCR Results]")
|
| 26 |
+
for bbox, text, conf in results:
|
| 27 |
+
print(f"Text: {text}, Confidence: {conf:.2f}")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
extracted_info = {
|
| 31 |
+
"kh": None,
|
| 32 |
+
"frequency": None,
|
| 33 |
+
"voltage": None,
|
| 34 |
+
"serial_number": None,
|
| 35 |
+
"other_specs": []
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
kh_pattern = re.compile(r"\bK\s*H\s*[:=]?\s*([0-9.]+)", re.IGNORECASE)
|
| 40 |
+
freq_pattern = re.compile(r"\b([4-6]0)\s*(hz)?\b", re.IGNORECASE)
|
| 41 |
+
volt_pattern = re.compile(r"\b([1-4][0-9]{2})\s*(v|volt|volts)?\b", re.IGNORECASE)
|
| 42 |
+
serial_pattern = re.compile(r"\b(?:S/N|SN|Serial\s*(?:No|Number)?[:\s]*)?(\d{6,})\b", re.IGNORECASE)
|
| 43 |
+
|
| 44 |
+
for (_, text, _) in results:
|
| 45 |
+
text_clean = text.strip()
|
| 46 |
+
|
| 47 |
+
if not extracted_info["kh"]:
|
| 48 |
+
if kh_match := kh_pattern.search(text_clean):
|
| 49 |
+
extracted_info["kh"] = kh_match.group(1)
|
| 50 |
+
|
| 51 |
+
if not extracted_info["frequency"]:
|
| 52 |
+
if freq_match := freq_pattern.search(text_clean):
|
| 53 |
+
extracted_info["frequency"] = freq_match.group(1)
|
| 54 |
+
|
| 55 |
+
if not extracted_info["voltage"]:
|
| 56 |
+
if volt_match := volt_pattern.search(text_clean):
|
| 57 |
+
extracted_info["voltage"] = volt_match.group(1)
|
| 58 |
+
|
| 59 |
+
if not extracted_info["serial_number"]:
|
| 60 |
+
if serial_match := serial_pattern.search(text_clean):
|
| 61 |
+
if not re.search(r"hz|v|kh", text_clean.lower()):
|
| 62 |
+
extracted_info["serial_number"] = serial_match.group(1)
|
| 63 |
+
|
| 64 |
+
extracted_info["other_specs"].append(text_clean)
|
| 65 |
+
|
| 66 |
+
# Normalize output units
|
| 67 |
+
if extracted_info["voltage"]:
|
| 68 |
+
extracted_info["voltage"] += " V"
|
| 69 |
+
if extracted_info["frequency"]:
|
| 70 |
+
extracted_info["frequency"] += " Hz"
|
| 71 |
+
if extracted_info["kh"]:
|
| 72 |
+
extracted_info["kh"] += " Kh"
|
| 73 |
+
|
| 74 |
+
return extracted_info
|
one_shot_model.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.optim as optim
|
| 5 |
+
from torchvision import datasets, transforms
|
| 6 |
+
from torch.utils.data import DataLoader, random_split
|
| 7 |
+
|
| 8 |
+
# -------------------------------
|
| 9 |
+
# CONFIGURATION
|
| 10 |
+
# -------------------------------
|
| 11 |
+
DATA_DIR = "training" # Your root dir with 'flat/' and 'pitched/'
|
| 12 |
+
BATCH_SIZE = 16
|
| 13 |
+
EPOCHS = 20
|
| 14 |
+
LR = 0.001
|
| 15 |
+
IMG_SIZE = 128
|
| 16 |
+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 17 |
+
|
| 18 |
+
# -------------------------------
|
| 19 |
+
# TRANSFORMS
|
| 20 |
+
# -------------------------------
|
| 21 |
+
transform = transforms.Compose([
|
| 22 |
+
transforms.Resize((IMG_SIZE, IMG_SIZE)),
|
| 23 |
+
transforms.ToTensor(),
|
| 24 |
+
transforms.Normalize([0.5]*3, [0.5]*3) # RGB normalization
|
| 25 |
+
])
|
| 26 |
+
|
| 27 |
+
# -------------------------------
|
| 28 |
+
# DATASET AND LOADERS
|
| 29 |
+
# -------------------------------
|
| 30 |
+
dataset = datasets.ImageFolder(root=DATA_DIR, transform=transform)
|
| 31 |
+
train_len = int(0.8 * len(dataset))
|
| 32 |
+
val_len = len(dataset) - train_len
|
| 33 |
+
train_set, val_set = random_split(dataset, [train_len, val_len])
|
| 34 |
+
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
|
| 35 |
+
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)
|
| 36 |
+
|
| 37 |
+
# -------------------------------
|
| 38 |
+
# FINAL CNN MODEL
|
| 39 |
+
# -------------------------------
|
| 40 |
+
class RoofClassifierCNN(nn.Module):
|
| 41 |
+
def __init__(self):
|
| 42 |
+
super(RoofClassifierCNN, self).__init__()
|
| 43 |
+
self.net = nn.Sequential(
|
| 44 |
+
nn.Conv2d(3, 16, kernel_size=3, padding=1),
|
| 45 |
+
nn.BatchNorm2d(16),
|
| 46 |
+
nn.ReLU(),
|
| 47 |
+
nn.MaxPool2d(2), # -> 16x64x64
|
| 48 |
+
|
| 49 |
+
nn.Conv2d(16, 32, kernel_size=3, padding=1),
|
| 50 |
+
nn.BatchNorm2d(32),
|
| 51 |
+
nn.ReLU(),
|
| 52 |
+
nn.MaxPool2d(2), # -> 32x32x32
|
| 53 |
+
|
| 54 |
+
nn.Conv2d(32, 64, kernel_size=3, padding=1),
|
| 55 |
+
nn.BatchNorm2d(64),
|
| 56 |
+
nn.ReLU(),
|
| 57 |
+
nn.MaxPool2d(2), # -> 64x16x16
|
| 58 |
+
)
|
| 59 |
+
self.fc = nn.Sequential(
|
| 60 |
+
nn.Flatten(),
|
| 61 |
+
nn.Linear(64 * 16 * 16, 128),
|
| 62 |
+
nn.ReLU(),
|
| 63 |
+
nn.Dropout(0.3),
|
| 64 |
+
nn.Linear(128, 2)
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
x = self.net(x)
|
| 69 |
+
x = self.fc(x)
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
model = RoofClassifierCNN().to(DEVICE)
|
| 73 |
+
|
| 74 |
+
# -------------------------------
|
| 75 |
+
# LOSS & OPTIMIZER
|
| 76 |
+
# -------------------------------
|
| 77 |
+
criterion = nn.CrossEntropyLoss()
|
| 78 |
+
optimizer = optim.Adam(model.parameters(), lr=LR)
|
| 79 |
+
|
| 80 |
+
# -------------------------------
|
| 81 |
+
# TRAINING LOOP
|
| 82 |
+
# -------------------------------
|
| 83 |
+
for epoch in range(EPOCHS):
|
| 84 |
+
model.train()
|
| 85 |
+
total_loss, correct, total = 0.0, 0, 0
|
| 86 |
+
|
| 87 |
+
for imgs, labels in train_loader:
|
| 88 |
+
imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)
|
| 89 |
+
optimizer.zero_grad()
|
| 90 |
+
outputs = model(imgs)
|
| 91 |
+
loss = criterion(outputs, labels)
|
| 92 |
+
loss.backward()
|
| 93 |
+
optimizer.step()
|
| 94 |
+
|
| 95 |
+
total_loss += loss.item()
|
| 96 |
+
_, predicted = outputs.max(1)
|
| 97 |
+
total += labels.size(0)
|
| 98 |
+
correct += predicted.eq(labels).sum().item()
|
| 99 |
+
|
| 100 |
+
acc = 100 * correct / total
|
| 101 |
+
print(f"Epoch {epoch+1:02d}/{EPOCHS} - Loss: {total_loss:.4f} - Accuracy: {acc:.2f}%")
|
| 102 |
+
|
| 103 |
+
# -------------------------------
|
| 104 |
+
# VALIDATION
|
| 105 |
+
# -------------------------------
|
| 106 |
+
model.eval()
|
| 107 |
+
correct = 0
|
| 108 |
+
total = 0
|
| 109 |
+
with torch.no_grad():
|
| 110 |
+
for imgs, labels in val_loader:
|
| 111 |
+
imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)
|
| 112 |
+
outputs = model(imgs)
|
| 113 |
+
_, predicted = outputs.max(1)
|
| 114 |
+
total += labels.size(0)
|
| 115 |
+
correct += predicted.eq(labels).sum().item()
|
| 116 |
+
|
| 117 |
+
val_acc = 100 * correct / total
|
| 118 |
+
print(f"\n✅ Final Validation Accuracy: {val_acc:.2f}%")
|
| 119 |
+
|
| 120 |
+
# -------------------------------
|
| 121 |
+
# SAVE MODEL
|
| 122 |
+
# -------------------------------
|
| 123 |
+
torch.save(model.state_dict(), "roof_type_cnn_best.pth")
|
| 124 |
+
print("🧠 Model saved as roof_type_cnn_best.pth")
|
package.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"builds": [
|
| 3 |
+
{ "src": "main.py", "use": "@vercel/python" }
|
| 4 |
+
],
|
| 5 |
+
"routes": [
|
| 6 |
+
{ "src": "/(.*)", "dest": "main.py" }
|
| 7 |
+
]
|
| 8 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
pillow
|
| 4 |
+
numpy
|
| 5 |
+
easyocr
|
| 6 |
+
torch
|
| 7 |
+
torchvision
|
roof_type_cnn_best.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c3244afb32f0e3faae1195b16058abd19a717982857694cddf26e5cbb51c1d11
|
| 3 |
+
size 8494957
|