DefendIntelligence commited on
Commit
ecb622f
·
verified ·
1 Parent(s): abc8b09

Add local install bootstrap and GitHub links

Browse files
Files changed (7) hide show
  1. .gitignore +3 -0
  2. README.md +41 -5
  3. app.py +11 -2
  4. models/.gitkeep +1 -0
  5. run_local.py +100 -0
  6. start.ps1 +7 -0
  7. start.sh +8 -0
.gitignore CHANGED
@@ -1,3 +1,6 @@
1
  __pycache__/
2
  *.pyc
3
  *.log
 
 
 
 
1
  __pycache__/
2
  *.pyc
3
  *.log
4
+ .venv/
5
+ models/*.pt
6
+ !models/.gitkeep
README.md CHANGED
@@ -11,9 +11,15 @@ license: mit
11
 
12
  Gradio Space for detecting vessels in satellite imagery with a fine-tuned YOLOv8 model.
13
 
14
- ## Included Model
15
 
16
- - File: `models/best.pt`
 
 
 
 
 
 
17
  - Checkpoint source: `train-20260417T124314Z-fad9d3ed_best.pt`
18
  - Run source: `infer-b88a2887`
19
  - Training name: `super-visible-y8s-newlabels-focuslite-e45`
@@ -21,7 +27,39 @@ Gradio Space for detecting vessels in satellite imagery with a fine-tuned YOLOv8
21
  - Main dataset: `sentinel-2-rgb`
22
  - Local index mAP50: `0.7912`
23
 
24
- ## Usage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  1. Upload an RGB satellite image or select an example.
27
  2. Adjust the confidence threshold if needed.
@@ -31,8 +69,6 @@ The app tiles large images before inference so small vessels remain visible to t
31
 
32
  ## Hugging Face Deployment
33
 
34
- Depuis ce dossier:
35
-
36
  ```bash
37
  git init
38
  git lfs install
 
11
 
12
  Gradio Space for detecting vessels in satellite imagery with a fine-tuned YOLOv8 model.
13
 
14
+ ## Links
15
 
16
+ - Live Space: https://huggingface.co/spaces/DefendIntelligence/vessel-detection
17
+ - Model repository: https://huggingface.co/DefendIntelligence/vessel-detection
18
+ - Direct model download: https://huggingface.co/DefendIntelligence/vessel-detection/resolve/main/models/best.pt
19
+
20
+ ## Model
21
+
22
+ - Local file expected by the app: `models/best.pt`
23
  - Checkpoint source: `train-20260417T124314Z-fad9d3ed_best.pt`
24
  - Run source: `infer-b88a2887`
25
  - Training name: `super-visible-y8s-newlabels-focuslite-e45`
 
27
  - Main dataset: `sentinel-2-rgb`
28
  - Local index mAP50: `0.7912`
29
 
30
+ The GitHub repository does not store `best.pt`. Use the bootstrap command below and it will download the model from Hugging Face.
31
+
32
+ ## Run Locally
33
+
34
+ ```bash
35
+ git clone https://github.com/anisayari/vessel-detection.git
36
+ cd vessel-detection
37
+ python run_local.py
38
+ ```
39
+
40
+ Windows shortcut:
41
+
42
+ ```powershell
43
+ .\start.ps1
44
+ ```
45
+
46
+ macOS/Linux shortcut:
47
+
48
+ ```bash
49
+ bash start.sh
50
+ ```
51
+
52
+ The script creates a local `.venv`, installs `requirements.txt`, downloads `models/best.pt` from Hugging Face, then starts Gradio at `http://127.0.0.1:7860`.
53
+
54
+ Useful options:
55
+
56
+ ```bash
57
+ python run_local.py --download-only
58
+ python run_local.py --skip-install
59
+ python run_local.py --host 0.0.0.0 --port 7860
60
+ ```
61
+
62
+ ## Use The App
63
 
64
  1. Upload an RGB satellite image or select an example.
65
  2. Adjust the confidence threshold if needed.
 
69
 
70
  ## Hugging Face Deployment
71
 
 
 
72
  ```bash
73
  git init
74
  git lfs install
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from __future__ import annotations
2
 
 
3
  from functools import lru_cache
4
  from pathlib import Path
5
 
@@ -10,6 +11,7 @@ from ultralytics import YOLO
10
 
11
  ROOT = Path(__file__).resolve().parent
12
  MODEL_PATH = ROOT / "models" / "best.pt"
 
13
  EXAMPLES_DIR = ROOT / "examples"
14
  MAX_TILES = 196
15
  BATCH_SIZE = 8
@@ -18,7 +20,9 @@ BATCH_SIZE = 8
18
  @lru_cache(maxsize=1)
19
  def load_model() -> YOLO:
20
  if not MODEL_PATH.exists():
21
- raise FileNotFoundError(f"Model not found: {MODEL_PATH}")
 
 
22
  return YOLO(str(MODEL_PATH))
23
 
24
 
@@ -266,4 +270,9 @@ with gr.Blocks(title="Vessel Detection") as demo:
266
 
267
 
268
  if __name__ == "__main__":
269
- demo.launch()
 
 
 
 
 
 
1
  from __future__ import annotations
2
 
3
+ import os
4
  from functools import lru_cache
5
  from pathlib import Path
6
 
 
11
 
12
  ROOT = Path(__file__).resolve().parent
13
  MODEL_PATH = ROOT / "models" / "best.pt"
14
+ MODEL_URL = "https://huggingface.co/DefendIntelligence/vessel-detection/resolve/main/models/best.pt"
15
  EXAMPLES_DIR = ROOT / "examples"
16
  MAX_TILES = 196
17
  BATCH_SIZE = 8
 
20
  @lru_cache(maxsize=1)
21
  def load_model() -> YOLO:
22
  if not MODEL_PATH.exists():
23
+ raise FileNotFoundError(
24
+ f"Model not found: {MODEL_PATH}. Run `python run_local.py` or download it from {MODEL_URL}."
25
+ )
26
  return YOLO(str(MODEL_PATH))
27
 
28
 
 
270
 
271
 
272
  if __name__ == "__main__":
273
+ launch_kwargs = {}
274
+ if os.environ.get("GRADIO_SERVER_NAME"):
275
+ launch_kwargs["server_name"] = os.environ["GRADIO_SERVER_NAME"]
276
+ if os.environ.get("GRADIO_SERVER_PORT"):
277
+ launch_kwargs["server_port"] = int(os.environ["GRADIO_SERVER_PORT"])
278
+ demo.launch(**launch_kwargs)
models/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
run_local.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ import subprocess
6
+ import urllib.request
7
+ import venv
8
+ from pathlib import Path
9
+
10
+
11
+ ROOT = Path(__file__).resolve().parent
12
+ VENV_DIR = ROOT / ".venv"
13
+ MODEL_DIR = ROOT / "models"
14
+ MODEL_PATH = MODEL_DIR / "best.pt"
15
+ MODEL_URL = "https://huggingface.co/DefendIntelligence/vessel-detection/resolve/main/models/best.pt"
16
+
17
+
18
+ def _venv_python() -> Path:
19
+ if os.name == "nt":
20
+ return VENV_DIR / "Scripts" / "python.exe"
21
+ return VENV_DIR / "bin" / "python"
22
+
23
+
24
+ def _run(command: list[str | os.PathLike[str]], env: dict[str, str] | None = None) -> None:
25
+ printable = " ".join(str(part) for part in command)
26
+ print(f"\n$ {printable}", flush=True)
27
+ subprocess.check_call([str(part) for part in command], cwd=ROOT, env=env)
28
+
29
+
30
+ def _ensure_venv() -> Path:
31
+ python_path = _venv_python()
32
+ if not python_path.exists():
33
+ print(f"Creating virtual environment: {VENV_DIR}", flush=True)
34
+ venv.EnvBuilder(with_pip=True).create(VENV_DIR)
35
+ return python_path
36
+
37
+
38
+ def _install_dependencies(python_path: Path) -> None:
39
+ _run([python_path, "-m", "pip", "install", "--upgrade", "pip"])
40
+ _run([python_path, "-m", "pip", "install", "-r", "requirements.txt"])
41
+
42
+
43
+ def _download_model() -> None:
44
+ MODEL_DIR.mkdir(parents=True, exist_ok=True)
45
+ if MODEL_PATH.exists() and MODEL_PATH.stat().st_size > 0:
46
+ print(f"Model already present: {MODEL_PATH}", flush=True)
47
+ return
48
+
49
+ tmp_path = MODEL_PATH.with_suffix(".pt.tmp")
50
+ print(f"Downloading model from Hugging Face:\n{MODEL_URL}", flush=True)
51
+ with urllib.request.urlopen(MODEL_URL) as response, tmp_path.open("wb") as handle:
52
+ total = int(response.headers.get("Content-Length") or 0)
53
+ downloaded = 0
54
+ while True:
55
+ chunk = response.read(1024 * 1024)
56
+ if not chunk:
57
+ break
58
+ handle.write(chunk)
59
+ downloaded += len(chunk)
60
+ if total:
61
+ percent = downloaded * 100 / total
62
+ print(f"\r{downloaded / 1_000_000:.1f} MB / {total / 1_000_000:.1f} MB ({percent:.0f}%)", end="")
63
+ else:
64
+ print(f"\r{downloaded / 1_000_000:.1f} MB", end="")
65
+ print()
66
+ tmp_path.replace(MODEL_PATH)
67
+ print(f"Saved model to: {MODEL_PATH}", flush=True)
68
+
69
+
70
+ def main() -> None:
71
+ parser = argparse.ArgumentParser(description="Install and run the Vessel Detection Gradio demo locally.")
72
+ parser.add_argument("--skip-install", action="store_true", help="Do not install Python dependencies.")
73
+ parser.add_argument("--download-only", action="store_true", help="Download the model and exit.")
74
+ parser.add_argument("--host", default="127.0.0.1", help="Gradio server host.")
75
+ parser.add_argument("--port", default="7860", help="Gradio server port.")
76
+ args = parser.parse_args()
77
+
78
+ python_path = None
79
+ if not (args.download_only and args.skip_install):
80
+ python_path = _ensure_venv()
81
+ if not args.skip_install:
82
+ if python_path is None:
83
+ python_path = _ensure_venv()
84
+ _install_dependencies(python_path)
85
+ _download_model()
86
+
87
+ if args.download_only:
88
+ return
89
+
90
+ if python_path is None:
91
+ python_path = _ensure_venv()
92
+ env = os.environ.copy()
93
+ env["GRADIO_SERVER_NAME"] = args.host
94
+ env["GRADIO_SERVER_PORT"] = args.port
95
+ print(f"\nStarting Gradio at http://{args.host}:{args.port}", flush=True)
96
+ _run([python_path, "app.py"], env=env)
97
+
98
+
99
+ if __name__ == "__main__":
100
+ main()
start.ps1 ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ $ErrorActionPreference = "Stop"
2
+
3
+ if (Get-Command py -ErrorAction SilentlyContinue) {
4
+ py -3 run_local.py
5
+ } else {
6
+ python run_local.py
7
+ }
start.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ if command -v python3 >/dev/null 2>&1; then
5
+ python3 run_local.py
6
+ else
7
+ python run_local.py
8
+ fi