john5050 commited on
Commit
76479e1
·
1 Parent(s): 3948724

made a inference model and added the inference script, requirements.txt, and the flask app to the repository. The inference script is a jupyter notebook that contains the code for loading the model and making predictions on new images. The flask app is a simple web application that allows users to upload images and get predictions from the model. The requirements.txt file contains the necessary dependencies for running the flask app.

Browse files
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import segmentation_models_pytorch as smp
4
+ from torchvision import transforms
5
+ from PIL import Image
6
+ import numpy as np
7
+ import base64
8
+ import io
9
+ import cv2
10
+ from flask import Flask, request, jsonify, send_from_directory
11
+ import os
12
+
13
+ app = Flask(__name__, static_folder="static")
14
+
15
+ # ── Model — exact same as training ────────────────────────────
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+
18
+ model = smp.Unet(
19
+ encoder_name="efficientnet-b3",
20
+ encoder_weights=None, # no pretrained needed at inference
21
+ in_channels=3,
22
+ classes=1,
23
+ activation=None, # raw logits, same as training
24
+ )
25
+
26
+ MODEL_PATH = "best_model.pth"
27
+ if os.path.exists(MODEL_PATH):
28
+ model.load_state_dict(torch.load(MODEL_PATH, map_location=device, weight_only=True))
29
+ print(f"✅ Model loaded — device: {device}")
30
+ else:
31
+ print("⚠️ best_model.pth not found — running in demo mode")
32
+
33
+ model.to(device)
34
+ model.eval()
35
+
36
+ # ── Preprocessing — matches val_transform from training ───────
37
+ mean = np.array([0.485, 0.456, 0.406])
38
+ std = np.array([0.229, 0.224, 0.225])
39
+
40
+
41
+ def preprocess(pil_img, patch_size=256):
42
+ """Resize to nearest multiple of patch_size, normalize, tensorize."""
43
+ img = np.array(pil_img.convert("RGB"))
44
+ h, w = img.shape[:2]
45
+
46
+ # Pad to multiple of patch_size
47
+ new_h = ((h + patch_size - 1) // patch_size) * patch_size
48
+ new_w = ((w + patch_size - 1) // patch_size) * patch_size
49
+ padded = np.zeros((new_h, new_w, 3), dtype=np.float32)
50
+ padded[:h, :w] = img
51
+
52
+ # Normalize
53
+ padded = padded / 255.0
54
+ padded = (padded - mean) / std
55
+
56
+ tensor = torch.tensor(padded).permute(2, 0, 1).float().unsqueeze(0)
57
+ return tensor, (h, w)
58
+
59
+
60
+ def run_inference(pil_img, patch_size=256):
61
+ """Run patch-based inference matching training patch extraction."""
62
+ img = np.array(pil_img.convert("RGB"))
63
+ h, w = img.shape[:2]
64
+
65
+ # Pad to multiple of patch_size
66
+ new_h = ((h + patch_size - 1) // patch_size) * patch_size
67
+ new_w = ((w + patch_size - 1) // patch_size) * patch_size
68
+ padded = np.zeros((new_h, new_w, 3), dtype=np.uint8)
69
+ padded[:h, :w] = img
70
+
71
+ full_mask = np.zeros((new_h, new_w), dtype=np.float32)
72
+
73
+ for i in range(0, new_h, patch_size):
74
+ for j in range(0, new_w, patch_size):
75
+ patch = (
76
+ padded[i : i + patch_size, j : j + patch_size].astype(np.float32)
77
+ / 255.0
78
+ )
79
+ patch = (patch - mean) / std
80
+ tensor = (
81
+ torch.tensor(patch).permute(2, 0, 1).float().unsqueeze(0).to(device)
82
+ )
83
+
84
+ with torch.no_grad():
85
+ out = model(tensor)
86
+ prob = torch.sigmoid(out).squeeze().cpu().numpy()
87
+
88
+ full_mask[i : i + patch_size, j : j + patch_size] = prob
89
+
90
+ return full_mask[:h, :w], img
91
+
92
+
93
+ # ── Zoning + illegal detection (from your notebook) ───────────
94
+ def create_zoning_mask(shape):
95
+ h, w = shape
96
+ zoning = np.zeros((h, w), dtype=np.uint8)
97
+ zoning[:, w // 2 :] = 1
98
+ return zoning
99
+
100
+
101
+ def detect_illegal_buildings(binary_mask, zoning_mask):
102
+ num_labels, labels = cv2.connectedComponents(binary_mask.astype(np.uint8))
103
+ illegal, legal = [], []
104
+ for label in range(1, num_labels):
105
+ building_pixels = labels == label
106
+ if (building_pixels & (zoning_mask == 1)).any():
107
+ illegal.append(label)
108
+ else:
109
+ legal.append(label)
110
+ return illegal, legal, labels
111
+
112
+
113
+ def to_base64(arr_or_img):
114
+ if isinstance(arr_or_img, np.ndarray):
115
+ img = Image.fromarray(arr_or_img.astype(np.uint8))
116
+ else:
117
+ img = arr_or_img
118
+ buf = io.BytesIO()
119
+ img.save(buf, format="PNG")
120
+ return base64.b64encode(buf.getvalue()).decode()
121
+
122
+
123
+ # ── Routes ────────────────────────────────────────────────────
124
+ @app.route("/")
125
+ def index():
126
+ return send_from_directory("static", "index.html")
127
+
128
+
129
+ @app.route("/predict", methods=["POST"])
130
+ def predict():
131
+ if "image" not in request.files:
132
+ return jsonify({"error": "No image provided"}), 400
133
+
134
+ file = request.files["image"]
135
+ pil_img = Image.open(file.stream).convert("RGB")
136
+
137
+ # Run patch-based segmentation
138
+ prob_mask, orig_rgb = run_inference(pil_img, patch_size=256)
139
+ binary_mask = (prob_mask > 0.5).astype(np.uint8)
140
+
141
+ # Zoning-based illegal detection
142
+ zoning_mask = create_zoning_mask(binary_mask.shape)
143
+ illegal, legal, labels = detect_illegal_buildings(binary_mask, zoning_mask)
144
+
145
+ # Build overlay: illegal=red, legal=green
146
+ overlay = orig_rgb.copy()
147
+ for lbl in illegal:
148
+ overlay[labels == lbl] = [255, 0, 0]
149
+ for lbl in legal:
150
+ overlay[labels == lbl] = [0, 200, 100]
151
+
152
+ total = len(illegal) + len(legal)
153
+ illegal_pct = round(float(binary_mask.mean() * 100), 2)
154
+ verdict = "ILLEGAL CONSTRUCTION DETECTED" if illegal else "NO VIOLATION DETECTED"
155
+
156
+ return jsonify(
157
+ {
158
+ "verdict": verdict,
159
+ "illegal_count": len(illegal),
160
+ "legal_count": len(legal),
161
+ "total_count": total,
162
+ "illegal_percent": illegal_pct,
163
+ "device": str(device),
164
+ "original": to_base64(orig_rgb),
165
+ "mask": to_base64((prob_mask * 255).astype(np.uint8)),
166
+ "overlay": to_base64(overlay),
167
+ }
168
+ )
169
+
170
+
171
+ if __name__ == "__main__":
172
+ print(f"Running on: {device}")
173
+ app.run(debug=True, port=5000)
best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c65c14958276bafc09cb99befaaffcbc7ace552bf1b7dfdc7204c499f4056d22
3
+ size 53223435
inference script.ipynb ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "5d4984fd",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Note: you may need to restart the kernel to use updated packages.\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stderr",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
21
+ "gradio 5.43.1 requires fastapi<1.0,>=0.115.2, but you have fastapi 0.115.0 which is incompatible.\n",
22
+ "gradio 5.43.1 requires pydantic<2.12,>=2.0, but you have pydantic 2.12.5 which is incompatible.\n",
23
+ "gradio 5.43.1 requires starlette<1.0,>=0.40.0; sys_platform != \"emscripten\", but you have starlette 0.38.6 which is incompatible.\n"
24
+ ]
25
+ }
26
+ ],
27
+ "source": [
28
+ "%pip install -q torch torchvision pillow opencv-python segmentation-models-pytorch albumentations"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": 2,
34
+ "id": "8e934e27",
35
+ "metadata": {},
36
+ "outputs": [
37
+ {
38
+ "ename": "OSError",
39
+ "evalue": "[WinError 126] The specified module could not be found. Error loading \"c:\\Users\\abhay\\anaconda3\\Lib\\site-packages\\torch\\lib\\fbgemm.dll\" or one of its dependencies.",
40
+ "output_type": "error",
41
+ "traceback": [
42
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
43
+ "\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
44
+ "Cell \u001b[1;32mIn[2], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorchvision\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtransforms\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mtransforms\u001b[39;00m\n\u001b[0;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mPIL\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Image\n",
45
+ "File \u001b[1;32mc:\\Users\\abhay\\anaconda3\\Lib\\site-packages\\torch\\__init__.py:148\u001b[0m\n\u001b[0;32m 146\u001b[0m err \u001b[38;5;241m=\u001b[39m ctypes\u001b[38;5;241m.\u001b[39mWinError(ctypes\u001b[38;5;241m.\u001b[39mget_last_error())\n\u001b[0;32m 147\u001b[0m err\u001b[38;5;241m.\u001b[39mstrerror \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m Error loading \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdll\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m or one of its dependencies.\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m--> 148\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m err\n\u001b[0;32m 150\u001b[0m kernel32\u001b[38;5;241m.\u001b[39mSetErrorMode(prev_error_mode)\n\u001b[0;32m 153\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_preload_cuda_deps\u001b[39m(lib_folder, lib_name):\n",
46
+ "\u001b[1;31mOSError\u001b[0m: [WinError 126] The specified module could not be found. Error loading \"c:\\Users\\abhay\\anaconda3\\Lib\\site-packages\\torch\\lib\\fbgemm.dll\" or one of its dependencies."
47
+ ]
48
+ }
49
+ ],
50
+ "source": [
51
+ "import torch\n",
52
+ "import torchvision.transforms as transforms\n",
53
+ "from PIL import Image\n",
54
+ "import cv2\n",
55
+ "import numpy as np\n",
56
+ "import matplotlib.pyplot as plt\n",
57
+ "import segmentation_models_pytorch as smp\n",
58
+ "import albumentations as A\n",
59
+ "from albumentations.pytorch import ToTensorV2\n",
60
+ "\n",
61
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
62
+ "print(\"Using device:\", device)"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "id": "f37c3d10",
69
+ "metadata": {},
70
+ "outputs": [],
71
+ "source": [
72
+ "# ── Load trained model ──────────────────────────────────────────────────────\n",
73
+ "MODEL_PATH = \"best_model.pth\" # path to saved weights\n",
74
+ "\n",
75
+ "model = smp.Unet(\n",
76
+ " encoder_name=\"efficientnet-b3\",\n",
77
+ " encoder_weights=None, # weights loaded from checkpoint\n",
78
+ " in_channels=3,\n",
79
+ " classes=1,\n",
80
+ " activation=None\n",
81
+ ")\n",
82
+ "model.load_state_dict(torch.load(MODEL_PATH, map_location=device))\n",
83
+ "model.to(device)\n",
84
+ "model.eval()\n",
85
+ "print(\"Model loaded successfully.\")"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": null,
91
+ "id": "c63c88a3",
92
+ "metadata": {},
93
+ "outputs": [],
94
+ "source": [
95
+ "# ── Preprocess & run inference ───────────��───────────────────────────────────\n",
96
+ "IMAGE_PATH = r\"test images\\test 1.png\" # test image\n",
97
+ "PATCH_SIZE = 256\n",
98
+ "\n",
99
+ "transform = A.Compose([\n",
100
+ " A.Normalize(mean=(0.485, 0.456, 0.406),\n",
101
+ " std=(0.229, 0.224, 0.225)),\n",
102
+ " ToTensorV2()\n",
103
+ "])\n",
104
+ "\n",
105
+ "# Load image\n",
106
+ "img_bgr = cv2.imread(IMAGE_PATH)\n",
107
+ "assert img_bgr is not None, f\"Could not read image: {IMAGE_PATH}\"\n",
108
+ "img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n",
109
+ "\n",
110
+ "h, w = img_rgb.shape[:2]\n",
111
+ "\n",
112
+ "# Pad so dimensions are divisible by PATCH_SIZE\n",
113
+ "pad_h = (PATCH_SIZE - h % PATCH_SIZE) % PATCH_SIZE\n",
114
+ "pad_w = (PATCH_SIZE - w % PATCH_SIZE) % PATCH_SIZE\n",
115
+ "img_padded = np.pad(img_rgb, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')\n",
116
+ "H, W = img_padded.shape[:2]\n",
117
+ "\n",
118
+ "# Stitch patch predictions into a full mask\n",
119
+ "full_mask = np.zeros((H, W), dtype=np.float32)\n",
120
+ "\n",
121
+ "with torch.no_grad():\n",
122
+ " for i in range(0, H, PATCH_SIZE):\n",
123
+ " for j in range(0, W, PATCH_SIZE):\n",
124
+ " patch = img_padded[i:i+PATCH_SIZE, j:j+PATCH_SIZE]\n",
125
+ " tensor = transform(image=patch)[\"image\"].unsqueeze(0).to(device)\n",
126
+ " pred = torch.sigmoid(model(tensor)).squeeze().cpu().numpy()\n",
127
+ " full_mask[i:i+PATCH_SIZE, j:j+PATCH_SIZE] = pred\n",
128
+ "\n",
129
+ "# Crop back to original size\n",
130
+ "pred_mask = (full_mask[:h, :w] > 0.5).astype(np.uint8)\n",
131
+ "print(f\"Inference done. Image size: {h}×{w} | Buildings detected: {pred_mask.sum()>0}\")"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "code",
136
+ "execution_count": null,
137
+ "id": "ffbae7d8",
138
+ "metadata": {},
139
+ "outputs": [],
140
+ "source": [
141
+ "# ── Zoning mask & illegal building detection ─────────────────────────────────\n",
142
+ "# Default zoning: right half is restricted. Modify as needed.\n",
143
+ "def create_zoning_mask(shape):\n",
144
+ " \"\"\"Returns a binary mask (1 = restricted zone).\"\"\"\n",
145
+ " zm = np.zeros(shape, dtype=np.uint8)\n",
146
+ " zm[:, shape[1] // 2:] = 1\n",
147
+ " return zm\n",
148
+ "\n",
149
+ "def detect_illegal_buildings(building_mask, zoning_mask):\n",
150
+ " num_labels, labels = cv2.connectedComponents(building_mask)\n",
151
+ " illegal, legal = [], []\n",
152
+ " for lbl in range(1, num_labels):\n",
153
+ " pixels = (labels == lbl)\n",
154
+ " if (pixels & (zoning_mask == 1)).any():\n",
155
+ " illegal.append(lbl)\n",
156
+ " else:\n",
157
+ " legal.append(lbl)\n",
158
+ " return illegal, legal, labels\n",
159
+ "\n",
160
+ "def overlay_illegal(image_rgb, labels, illegal_buildings):\n",
161
+ " out = image_rgb.copy()\n",
162
+ " for lbl in illegal_buildings:\n",
163
+ " out[labels == lbl] = [255, 0, 0] # red highlight\n",
164
+ " return out\n",
165
+ "\n",
166
+ "zoning_mask = create_zoning_mask(pred_mask.shape)\n",
167
+ "illegal, legal, labels = detect_illegal_buildings(pred_mask, zoning_mask)\n",
168
+ "overlay = overlay_illegal(img_rgb, labels, illegal)\n",
169
+ "\n",
170
+ "print(f\"Total buildings : {len(illegal) + len(legal)}\")\n",
171
+ "print(f\"Illegal buildings: {len(illegal)}\")\n",
172
+ "print(f\"Legal buildings : {len(legal)}\")"
173
+ ]
174
+ },
175
+ {
176
+ "cell_type": "code",
177
+ "execution_count": null,
178
+ "id": "6692053a",
179
+ "metadata": {},
180
+ "outputs": [],
181
+ "source": [
182
+ "# ── Visualize results ────────────────────────────────────────────────────────\n",
183
+ "fig, axes = plt.subplots(1, 4, figsize=(20, 5))\n",
184
+ "\n",
185
+ "axes[0].imshow(img_rgb)\n",
186
+ "axes[0].set_title(\"Input Image\")\n",
187
+ "axes[0].axis(\"off\")\n",
188
+ "\n",
189
+ "axes[1].imshow(pred_mask, cmap=\"gray\")\n",
190
+ "axes[1].set_title(\"Building Mask\")\n",
191
+ "axes[1].axis(\"off\")\n",
192
+ "\n",
193
+ "axes[2].imshow(zoning_mask, cmap=\"gray\")\n",
194
+ "axes[2].set_title(\"Zoning Mask\\n(white = restricted)\")\n",
195
+ "axes[2].axis(\"off\")\n",
196
+ "\n",
197
+ "axes[3].imshow(overlay)\n",
198
+ "axes[3].set_title(f\"Illegal Buildings (red)\\nIllegal: {len(illegal)} | Legal: {len(legal)}\")\n",
199
+ "axes[3].axis(\"off\")\n",
200
+ "\n",
201
+ "plt.tight_layout()\n",
202
+ "plt.show()"
203
+ ]
204
+ }
205
+ ],
206
+ "metadata": {
207
+ "kernelspec": {
208
+ "display_name": ".venv",
209
+ "language": "python",
210
+ "name": "python3"
211
+ },
212
+ "language_info": {
213
+ "codemirror_mode": {
214
+ "name": "ipython",
215
+ "version": 3
216
+ },
217
+ "file_extension": ".py",
218
+ "mimetype": "text/x-python",
219
+ "name": "python",
220
+ "nbconvert_exporter": "python",
221
+ "pygments_lexer": "ipython3",
222
+ "version": "3.12.3"
223
+ }
224
+ },
225
+ "nbformat": 4,
226
+ "nbformat_minor": 5
227
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ flask
2
+ torch
3
+ torchvision
4
+ segmentation-models-pytorch
5
+ albumentations
6
+ opencv-python
7
+ pillow
8
+ numpy
static/index.html ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8"/>
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0"/>
6
+ <title>ConstructScan — Illegal Construction Detector</title>
7
+ <link href="https://fonts.googleapis.com/css2?family=Bebas+Neue&family=DM+Mono:wght@400;500&family=DM+Sans:wght@300;400;500&display=swap" rel="stylesheet"/>
8
+ <style>
9
+ :root {
10
+ --bg: #0a0a0a;
11
+ --surface: #111111;
12
+ --surface2: #1a1a1a;
13
+ --border: #2a2a2a;
14
+ --accent: #ff3c00;
15
+ --text: #f0ede8;
16
+ --muted: #666;
17
+ --safe: #00e676;
18
+ --danger: #ff3c00;
19
+ }
20
+ * { margin:0; padding:0; box-sizing:border-box; }
21
+ body { background:var(--bg); color:var(--text); font-family:'DM Sans',sans-serif; min-height:100vh; }
22
+ body::before {
23
+ content:''; position:fixed; inset:0; pointer-events:none; z-index:999;
24
+ background-image:url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='n'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23n)' opacity='0.03'/%3E%3C/svg%3E");
25
+ }
26
+ header {
27
+ border-bottom:1px solid var(--border); padding:1.2rem 2.5rem;
28
+ display:flex; align-items:center; justify-content:space-between;
29
+ position:sticky; top:0; background:rgba(10,10,10,0.96); backdrop-filter:blur(12px); z-index:100;
30
+ }
31
+ .logo { font-family:'Bebas Neue',sans-serif; font-size:1.7rem; letter-spacing:.1em; }
32
+ .logo span { color:var(--accent); }
33
+ .badge {
34
+ font-family:'DM Mono',monospace; font-size:.65rem; letter-spacing:.15em;
35
+ text-transform:uppercase; padding:.3rem .7rem; border:1px solid var(--border); color:var(--muted);
36
+ }
37
+ main { max-width:1100px; margin:0 auto; padding:3.5rem 2rem; }
38
+ .hero { margin-bottom:3rem; }
39
+ .hero h1 {
40
+ font-family:'Bebas Neue',sans-serif; font-size:clamp(2.8rem,7vw,6rem);
41
+ line-height:.92; letter-spacing:.02em; margin-bottom:1.2rem;
42
+ }
43
+ .hero h1 em { font-style:normal; color:var(--accent); display:block; }
44
+ .hero p { font-size:.95rem; color:var(--muted); max-width:460px; line-height:1.7; font-weight:300; }
45
+
46
+ .upload-zone {
47
+ border:1px dashed var(--border); padding:2.5rem 2rem; text-align:center;
48
+ cursor:pointer; background:var(--surface); position:relative; transition:all .2s;
49
+ margin-bottom:1rem;
50
+ }
51
+ .upload-zone:hover, .upload-zone.drag-over { border-color:var(--accent); background:#1a0906; }
52
+ .upload-zone input { position:absolute; inset:0; opacity:0; cursor:pointer; width:100%; height:100%; }
53
+ .upload-zone svg { width:40px; height:40px; margin:0 auto .8rem; opacity:.35; display:block; }
54
+ .upload-zone h3 {
55
+ font-family:'DM Mono',monospace; font-size:.8rem; letter-spacing:.1em;
56
+ text-transform:uppercase; color:var(--muted); margin-bottom:.4rem;
57
+ }
58
+ .upload-zone p { font-size:.75rem; color:#444; }
59
+
60
+ #preview-wrap { display:none; margin-bottom:1rem; position:relative; }
61
+ #preview-img { width:100%; max-height:280px; object-fit:cover; display:block; }
62
+ .preview-tag {
63
+ position:absolute; top:.8rem; left:.8rem; font-family:'DM Mono',monospace;
64
+ font-size:.6rem; letter-spacing:.15em; text-transform:uppercase;
65
+ background:rgba(0,0,0,.85); padding:.25rem .55rem; color:var(--muted);
66
+ }
67
+
68
+ .btn {
69
+ width:100%; padding:1.1rem; background:var(--accent); color:#fff; border:none;
70
+ font-family:'Bebas Neue',sans-serif; font-size:1.3rem; letter-spacing:.15em;
71
+ cursor:pointer; transition:background .2s; display:flex; align-items:center; justify-content:center; gap:.7rem;
72
+ }
73
+ .btn:hover { background:#e03500; }
74
+ .btn:disabled { background:#333; color:#555; cursor:not-allowed; }
75
+ .spinner {
76
+ width:18px; height:18px; border:2px solid rgba(255,255,255,.25); border-top-color:#fff;
77
+ border-radius:50%; animation:spin .7s linear infinite; display:none;
78
+ }
79
+ @keyframes spin { to { transform:rotate(360deg); } }
80
+
81
+ #error { display:none; margin-top:.8rem; padding:.9rem 1.2rem; background:#1a0500; border-left:3px solid var(--danger); font-family:'DM Mono',monospace; font-size:.75rem; color:var(--danger); }
82
+
83
+ #results { display:none; margin-top:2.5rem; animation:fadeUp .45s ease forwards; }
84
+ @keyframes fadeUp { from { opacity:0; transform:translateY(18px); } to { opacity:1; transform:translateY(0); } }
85
+
86
+ .verdict-bar {
87
+ padding:1.8rem 2rem; margin-bottom:1px; display:flex;
88
+ align-items:center; justify-content:space-between; gap:1.5rem; flex-wrap:wrap;
89
+ }
90
+ .verdict-bar.danger { background:#1a0400; border-left:4px solid var(--danger); }
91
+ .verdict-bar.safe { background:#001508; border-left:4px solid var(--safe); }
92
+ .verdict-label { font-family:'Bebas Neue',sans-serif; font-size:clamp(1.4rem,3.5vw,2.5rem); letter-spacing:.04em; }
93
+ .verdict-bar.danger .verdict-label { color:var(--danger); }
94
+ .verdict-bar.safe .verdict-label { color:var(--safe); }
95
+ .verdict-meta { display:flex; gap:2rem; }
96
+ .vmeta-item { text-align:right; }
97
+ .vmeta-item .num { font-family:'Bebas Neue',sans-serif; font-size:2.2rem; line-height:1; }
98
+ .vmeta-item .key { font-family:'DM Mono',monospace; font-size:.6rem; letter-spacing:.12em; text-transform:uppercase; color:var(--muted); }
99
+
100
+ .grid3 { display:grid; grid-template-columns:repeat(3,1fr); gap:1px; background:var(--border); margin-bottom:1px; }
101
+ .img-panel { background:var(--surface); overflow:hidden; }
102
+ .panel-label {
103
+ padding:.6rem 1rem; font-family:'DM Mono',monospace; font-size:.6rem;
104
+ letter-spacing:.14em; text-transform:uppercase; color:var(--muted);
105
+ border-bottom:1px solid var(--border); display:flex; align-items:center; gap:.4rem;
106
+ }
107
+ .dot { width:5px; height:5px; border-radius:50%; background:var(--accent); }
108
+ .dot.g { background:var(--safe); }
109
+ .img-panel img { width:100%; aspect-ratio:1; object-fit:cover; display:block; }
110
+
111
+ .stats4 { display:grid; grid-template-columns:repeat(4,1fr); gap:1px; background:var(--border); }
112
+ .stat { background:var(--surface2); padding:1.3rem 1.5rem; }
113
+ .stat .v { font-family:'Bebas Neue',sans-serif; font-size:1.8rem; color:var(--text); line-height:1; margin-bottom:.25rem; }
114
+ .stat .k { font-family:'DM Mono',monospace; font-size:.6rem; letter-spacing:.12em; text-transform:uppercase; color:var(--muted); }
115
+
116
+ @media(max-width:700px) {
117
+ header { padding:1rem; }
118
+ main { padding:2rem 1rem; }
119
+ .grid3, .stats4 { grid-template-columns:1fr; }
120
+ .verdict-meta { gap:1rem; }
121
+ }
122
+ </style>
123
+ </head>
124
+ <body>
125
+
126
+ <header>
127
+ <div class="logo">Construct<span>Scan</span></div>
128
+ <div class="badge">EfficientNet-B3 · U-Net · SMP</div>
129
+ </header>
130
+
131
+ <main>
132
+ <div class="hero">
133
+ <h1>Detect <em>Illegal</em> Construction</h1>
134
+ <p>Upload a satellite or aerial image. The model segments buildings, then flags those in restricted zones as illegal.</p>
135
+ </div>
136
+
137
+ <div class="upload-zone" id="upload-zone">
138
+ <input type="file" id="file-input" accept="image/*"/>
139
+ <svg viewBox="0 0 48 48" fill="none" stroke="currentColor" stroke-width="1.5">
140
+ <rect x="4" y="4" width="40" height="40" rx="2"/>
141
+ <path d="M24 32V16M16 24l8-8 8 8"/>
142
+ </svg>
143
+ <h3>Drop image here or click to upload</h3>
144
+ <p>Satellite / aerial imagery — JPG, PNG, TIFF</p>
145
+ </div>
146
+
147
+ <div id="preview-wrap">
148
+ <img id="preview-img" src="" alt="Preview"/>
149
+ <span class="preview-tag">Input</span>
150
+ </div>
151
+
152
+ <button class="btn" id="analyze-btn" disabled>
153
+ <div class="spinner" id="spinner"></div>
154
+ <span id="btn-text">ANALYZE IMAGE</span>
155
+ </button>
156
+
157
+ <div id="error"></div>
158
+
159
+ <div id="results">
160
+ <div class="verdict-bar" id="verdict-bar">
161
+ <div class="verdict-label" id="verdict-label"></div>
162
+ <div class="verdict-meta">
163
+ <div class="vmeta-item">
164
+ <div class="num" id="vm-illegal">0</div>
165
+ <div class="key">Illegal Buildings</div>
166
+ </div>
167
+ <div class="vmeta-item">
168
+ <div class="num" id="vm-total">0</div>
169
+ <div class="key">Total Buildings</div>
170
+ </div>
171
+ </div>
172
+ </div>
173
+
174
+ <div class="grid3">
175
+ <div class="img-panel">
176
+ <div class="panel-label"><span class="dot g"></span> Original</div>
177
+ <img id="out-orig" src="" alt="Original"/>
178
+ </div>
179
+ <div class="img-panel">
180
+ <div class="panel-label"><span class="dot"></span> Segmentation Mask</div>
181
+ <img id="out-mask" src="" alt="Mask"/>
182
+ </div>
183
+ <div class="img-panel">
184
+ <div class="panel-label"><span class="dot"></span> Illegal Overlay</div>
185
+ <img id="out-overlay" src="" alt="Overlay"/>
186
+ </div>
187
+ </div>
188
+
189
+ <div class="stats4">
190
+ <div class="stat"><div class="v" id="s-illegal">—</div><div class="k">Illegal Buildings</div></div>
191
+ <div class="stat"><div class="v" id="s-legal">—</div><div class="k">Legal Buildings</div></div>
192
+ <div class="stat"><div class="v" id="s-pct">—</div><div class="k">Area Flagged %</div></div>
193
+ <div class="stat"><div class="v" id="s-device">—</div><div class="k">Inference Device</div></div>
194
+ </div>
195
+ </div>
196
+ </main>
197
+
198
+ <script>
199
+ const fileInput = document.getElementById('file-input');
200
+ const uploadZone = document.getElementById('upload-zone');
201
+ const previewWrap = document.getElementById('preview-wrap');
202
+ const previewImg = document.getElementById('preview-img');
203
+ const analyzeBtn = document.getElementById('analyze-btn');
204
+ const spinner = document.getElementById('spinner');
205
+ const btnText = document.getElementById('btn-text');
206
+ const errorDiv = document.getElementById('error');
207
+ const results = document.getElementById('results');
208
+ let selectedFile = null;
209
+
210
+ uploadZone.addEventListener('dragover', e => { e.preventDefault(); uploadZone.classList.add('drag-over'); });
211
+ uploadZone.addEventListener('dragleave', () => uploadZone.classList.remove('drag-over'));
212
+ uploadZone.addEventListener('drop', e => {
213
+ e.preventDefault(); uploadZone.classList.remove('drag-over');
214
+ if (e.dataTransfer.files[0]) handleFile(e.dataTransfer.files[0]);
215
+ });
216
+ fileInput.addEventListener('change', () => { if (fileInput.files[0]) handleFile(fileInput.files[0]); });
217
+
218
+ function handleFile(file) {
219
+ selectedFile = file;
220
+ const r = new FileReader();
221
+ r.onload = e => { previewImg.src = e.target.result; previewWrap.style.display = 'block'; };
222
+ r.readAsDataURL(file);
223
+ analyzeBtn.disabled = false;
224
+ results.style.display = 'none';
225
+ errorDiv.style.display = 'none';
226
+ }
227
+
228
+ analyzeBtn.addEventListener('click', async () => {
229
+ if (!selectedFile) return;
230
+ analyzeBtn.disabled = true;
231
+ spinner.style.display = 'block';
232
+ btnText.textContent = 'ANALYZING...';
233
+ errorDiv.style.display = 'none';
234
+ results.style.display = 'none';
235
+
236
+ const fd = new FormData();
237
+ fd.append('image', selectedFile);
238
+
239
+ try {
240
+ const res = await fetch('/predict', { method:'POST', body:fd });
241
+ const d = await res.json();
242
+ if (d.error) throw new Error(d.error);
243
+
244
+ const isIllegal = d.illegal_count > 0;
245
+ const bar = document.getElementById('verdict-bar');
246
+ bar.className = 'verdict-bar ' + (isIllegal ? 'danger' : 'safe');
247
+ document.getElementById('verdict-label').textContent = d.verdict;
248
+ document.getElementById('vm-illegal').textContent = d.illegal_count;
249
+ document.getElementById('vm-total').textContent = d.total_count;
250
+
251
+ document.getElementById('out-orig').src = 'data:image/png;base64,' + d.original;
252
+ document.getElementById('out-mask').src = 'data:image/png;base64,' + d.mask;
253
+ document.getElementById('out-overlay').src = 'data:image/png;base64,' + d.overlay;
254
+
255
+ document.getElementById('s-illegal').textContent = d.illegal_count;
256
+ document.getElementById('s-legal').textContent = d.legal_count;
257
+ document.getElementById('s-pct').textContent = d.illegal_percent + '%';
258
+ document.getElementById('s-device').textContent = d.device.toUpperCase();
259
+
260
+ results.style.display = 'block';
261
+ } catch(err) {
262
+ errorDiv.textContent = '⚠ ' + (err.message || 'Server error. Is Flask running?');
263
+ errorDiv.style.display = 'block';
264
+ } finally {
265
+ analyzeBtn.disabled = false;
266
+ spinner.style.display = 'none';
267
+ btnText.textContent = 'ANALYZE AGAIN';
268
+ }
269
+ });
270
+ </script>
271
+ </body>
272
+ </html>
test images/test 1.png ADDED

Git LFS Details

  • SHA256: 6e9182decda453629e6d8b7461bf994d198869830892e0fd8ccd31513250b534
  • Pointer size: 132 Bytes
  • Size of remote file: 2.5 MB