github-actions commited on
Commit
856a5e2
·
1 Parent(s): 53f6595

Sync from GitHub to Hugging Face

Browse files
Files changed (21) hide show
  1. space_repo/requirements.txt +2 -0
  2. space_repo/space_repo/space_repo/app.py +98 -37
  3. space_repo/space_repo/space_repo/space_repo/space_repo/app.py +6 -3
  4. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +98 -83
  5. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +5 -0
  6. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +25 -1
  7. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +14 -13
  8. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +9 -2
  9. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +14 -5
  10. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +14 -1
  11. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +3 -0
  12. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +27 -113
  13. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +2 -2
  14. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/YOLO.ipynb +0 -0
  15. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/car_classifier.pth +3 -0
  16. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +11 -1
  17. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/.gitattributes +35 -0
  18. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/README.md +91 -0
  19. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +181 -0
  20. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +1 -0
  21. yolo_module.py +173 -0
space_repo/requirements.txt CHANGED
@@ -17,3 +17,5 @@ ipython
17
 
18
  seaborn
19
  gitpython
 
 
 
17
 
18
  seaborn
19
  gitpython
20
+
21
+ opencv-python-headless
space_repo/space_repo/space_repo/app.py CHANGED
@@ -23,68 +23,133 @@ with open("YOLO.ipynb", "r", encoding="utf-8") as f:
23
  nb = nbformat.read(f, as_version=4)
24
 
25
  # ---------------------------
26
- # Robust: extract only the essential notebook cells and write yolo_converted.py
27
  # ---------------------------
28
- import nbformat, re, runpy, os, json
29
-
30
- # indices of the important cells found in your YOLO.ipynb
31
- important_cell_indices = [2, 4, 6, 7]
32
 
33
  with open("YOLO.ipynb", "r", encoding="utf-8") as f:
34
  nb_all = nbformat.read(f, as_version=4)
35
 
36
- # safe patterns to remove (Colab magics / upload/demo lines)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  BAD_LINE_PATTERNS = [
38
  r'^\s*!', # shell commands
39
  r'^\s*%', # magics
40
- 'google.colab',
41
- 'files.upload',
42
  r'\buploaded\b',
43
  r'\bimg_path\b',
44
- # note: do NOT ban 'detect_and_classify(' so the function def stays intact
45
  ]
46
 
47
- collected_cells = []
48
- for idx in important_cell_indices:
49
- if idx < 0 or idx >= len(nb_all.cells):
 
 
50
  continue
51
- cell = nb_all.cells[idx]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  if cell.cell_type != "code":
53
  continue
54
- lines = []
55
- for line in cell.source.splitlines():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  if any(re.search(p, line) for p in BAD_LINE_PATTERNS):
57
- # skip Colab-only or demo lines inside the important cell
58
  continue
59
- lines.append(line)
60
- # only append non-empty cell content
61
- if any(l.strip() for l in lines):
62
- collected_cells.append("\n".join(lines))
 
 
 
63
 
64
- # join with explicit separators (makes debugging easier)
65
- safe_code = "\n\n# ---- cell boundary ----\n\n".join(collected_cells)
66
 
67
- # small normalization: remove any top-of-file stray "pass # skipped..." left by earlier attempts
68
- safe_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', safe_code, count=1, flags=re.M)
69
- # ensure top-level code does not start with an indented 'pass'
70
- safe_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', safe_code, flags=re.M)
71
-
72
- # Ensure numpy is available (some extracted cells use np)
73
  if not re.search(r'(^|\n)\s*(import numpy\b|import numpy as\b|from numpy\b)', safe_code):
74
  safe_code = "import numpy as np\n\n" + safe_code
75
 
 
 
 
 
 
76
  with open("yolo_converted.py", "w", encoding="utf-8") as fh:
77
  fh.write(safe_code)
78
 
79
- # quick log (first 800 chars) to help debug in HF logs
80
  print("Wrote yolo_converted.py — preview:")
81
- print(safe_code[:800].replace("\n", "\\n"))
 
 
 
82
 
83
- # Try to load it
84
  try:
85
  mod = runpy.run_path("yolo_converted.py")
86
  except Exception as e:
87
- # show snippet to help debugging
88
  head = ""
89
  try:
90
  with open("yolo_converted.py", "r", encoding="utf-8") as fh:
@@ -96,12 +161,8 @@ except Exception as e:
96
  # pull the function
97
  detect_and_classify = mod.get("detect_and_classify")
98
  if not detect_and_classify:
99
- raise RuntimeError("detect_and_classify() not found in the extracted cells. Please ensure cell indices are correct.")
100
  print("✅ detect_and_classify() found and loaded.")
101
- # ---------------------------
102
- # End extraction block
103
- # ---------------------------
104
-
105
 
106
  # --- Load class names (optional, cached to file) ---
107
  try:
 
23
  nb = nbformat.read(f, as_version=4)
24
 
25
  # ---------------------------
26
+ # Robust dynamic extraction from YOLO.ipynb (safer)
27
  # ---------------------------
28
+ import re, runpy
 
 
 
29
 
30
  with open("YOLO.ipynb", "r", encoding="utf-8") as f:
31
  nb_all = nbformat.read(f, as_version=4)
32
 
33
+ # Patterns that indicate a cell is important (we'll include any cell that matches any marker)
34
+ IMPORTANT_MARKERS = [
35
+ r"torch\.hub\.load\(", # yolo loader cell
36
+ r"models\.resnet18", # classifier architecture
37
+ r"model\.load_state_dict", # checkpoint load
38
+ r"transform\(", # transform definition
39
+ r"transforms\.", # torchvision transforms usage
40
+ r"def get_color_name", # color helper
41
+ r"def detect_and_classify", # the pipeline function (must be preserved)
42
+ r"import numpy", # numpy import
43
+ r"import torch", # torch import
44
+ r"from torchvision", # torchvision imports
45
+ r"from PIL import", # PIL imports
46
+ r"import cv2", # optional cv usage
47
+ ]
48
+
49
+ # Lines we will strip from included cells (Colab magics / file upload demos)
50
  BAD_LINE_PATTERNS = [
51
  r'^\s*!', # shell commands
52
  r'^\s*%', # magics
53
+ r'google\.colab',
54
+ r'files.upload',
55
  r'\buploaded\b',
56
  r'\bimg_path\b',
 
57
  ]
58
 
59
+ # Collect unique cells that match any important marker or imports
60
+ selected_cells = []
61
+ seen_indices = set()
62
+ for i, cell in enumerate(nb_all.cells):
63
+ if cell.cell_type != "code":
64
  continue
65
+ src = cell.source or ""
66
+ # include if any important marker matches
67
+ if any(re.search(p, src, flags=re.I) for p in IMPORTANT_MARKERS):
68
+ if i not in seen_indices:
69
+ seen_indices.add(i)
70
+ selected_cells.append((i, src))
71
+
72
+ # If we didn't find the function, as a fallback include any cell that looks like it contains function words
73
+ if not any("def detect_and_classify" in src for (_, src) in selected_cells):
74
+ for i, cell in enumerate(nb_all.cells):
75
+ if cell.cell_type != "code":
76
+ continue
77
+ src = cell.source or ""
78
+ if any(k in src.lower() for k in ["def detect", "def classify", "def predict", "detect_and_classify"]):
79
+ if i not in seen_indices:
80
+ seen_indices.add(i)
81
+ selected_cells.append((i, src))
82
+
83
+ # Also ensure import cells appear first
84
+ import_cells = []
85
+ for i, cell in enumerate(nb_all.cells):
86
  if cell.cell_type != "code":
87
  continue
88
+ src = cell.source or ""
89
+ if re.search(r'^\s*(import |from )', src, flags=re.M):
90
+ if i not in seen_indices:
91
+ import_cells.append((i, src))
92
+ seen_indices.add(i)
93
+
94
+ # Sort selected cells by original order: imports first, then selected_cells by index
95
+ selected_cells_sorted = sorted(import_cells + selected_cells, key=lambda tup: tup[0])
96
+
97
+ # Clean each selected cell, but keep any cell that contains the function header intact.
98
+ cleaned_cells = []
99
+ for idx, src in selected_cells_sorted:
100
+ # If this cell contains the function definition, keep it largely intact
101
+ if "def detect_and_classify" in src:
102
+ lines = []
103
+ for line in src.splitlines():
104
+ # remove only Colab magics / shell commands, but keep everything else
105
+ if re.match(r'^\s*!', line) or re.match(r'^\s*%', line):
106
+ continue
107
+ # keep the def line and body exactly as-is otherwise
108
+ lines.append(line)
109
+ cleaned_src = "\n".join(lines).rstrip()
110
+ if cleaned_src:
111
+ cleaned_cells.append(cleaned_src)
112
+ continue
113
+
114
+ # Otherwise, perform conservative cleaning: remove magics, uploads, and *top-level* test calls
115
+ cleaned_lines = []
116
+ for line in src.splitlines():
117
+ # drop Colab magics / shell commands and file picker demo lines
118
  if any(re.search(p, line) for p in BAD_LINE_PATTERNS):
 
119
  continue
120
+ # drop top-level calls to detect_and_classify() in non-function cells (prevents auto-run)
121
+ if re.search(r'\bdetect_and_classify\s*\(', line):
122
+ continue
123
+ cleaned_lines.append(line)
124
+ cleaned_src = "\n".join(cleaned_lines).rstrip()
125
+ if cleaned_src:
126
+ cleaned_cells.append(cleaned_src)
127
 
128
+ # Join cells into script (imports / helpers first by sorted index)
129
+ safe_code = "\n\n# ---- cell boundary ----\n\n".join(cleaned_cells)
130
 
131
+ # Ensure numpy is available if used
 
 
 
 
 
132
  if not re.search(r'(^|\n)\s*(import numpy\b|import numpy as\b|from numpy\b)', safe_code):
133
  safe_code = "import numpy as np\n\n" + safe_code
134
 
135
+ # Sanitize stray placeholders
136
+ safe_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', safe_code, count=1, flags=re.M)
137
+ safe_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', safe_code, flags=re.M)
138
+
139
+ # Write converted file
140
  with open("yolo_converted.py", "w", encoding="utf-8") as fh:
141
  fh.write(safe_code)
142
 
 
143
  print("Wrote yolo_converted.py — preview:")
144
+ print(safe_code[:1000].replace("\n", "\\n"))
145
+ # ---------------------------
146
+ # End dynamic extraction block
147
+ # ---------------------------
148
 
149
+ # --- Try to import and run the converted module ---
150
  try:
151
  mod = runpy.run_path("yolo_converted.py")
152
  except Exception as e:
 
153
  head = ""
154
  try:
155
  with open("yolo_converted.py", "r", encoding="utf-8") as fh:
 
161
  # pull the function
162
  detect_and_classify = mod.get("detect_and_classify")
163
  if not detect_and_classify:
164
+ raise RuntimeError("detect_and_classify() not found in the extracted cells. Please ensure your notebook defines it.")
165
  print("✅ detect_and_classify() found and loaded.")
 
 
 
 
166
 
167
  # --- Load class names (optional, cached to file) ---
168
  try:
space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -24,12 +24,10 @@ with open("YOLO.ipynb", "r", encoding="utf-8") as f:
24
 
25
  # ---------------------------
26
  # Robust: extract only the essential notebook cells and write yolo_converted.py
27
- # (this block was inserted per your request)
28
  # ---------------------------
29
  import nbformat, re, runpy, os, json
30
 
31
  # indices of the important cells found in your YOLO.ipynb
32
- # (these came from inspecting the uploaded notebook)
33
  important_cell_indices = [2, 4, 6, 7]
34
 
35
  with open("YOLO.ipynb", "r", encoding="utf-8") as f:
@@ -43,6 +41,7 @@ BAD_LINE_PATTERNS = [
43
  'files.upload',
44
  r'\buploaded\b',
45
  r'\bimg_path\b',
 
46
  ]
47
 
48
  collected_cells = []
@@ -70,6 +69,10 @@ safe_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', safe_code, c
70
  # ensure top-level code does not start with an indented 'pass'
71
  safe_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', safe_code, flags=re.M)
72
 
 
 
 
 
73
  with open("yolo_converted.py", "w", encoding="utf-8") as fh:
74
  fh.write(safe_code)
75
 
@@ -96,7 +99,7 @@ if not detect_and_classify:
96
  raise RuntimeError("detect_and_classify() not found in the extracted cells. Please ensure cell indices are correct.")
97
  print("✅ detect_and_classify() found and loaded.")
98
  # ---------------------------
99
- # End inserted extraction block
100
  # ---------------------------
101
 
102
 
 
24
 
25
  # ---------------------------
26
  # Robust: extract only the essential notebook cells and write yolo_converted.py
 
27
  # ---------------------------
28
  import nbformat, re, runpy, os, json
29
 
30
  # indices of the important cells found in your YOLO.ipynb
 
31
  important_cell_indices = [2, 4, 6, 7]
32
 
33
  with open("YOLO.ipynb", "r", encoding="utf-8") as f:
 
41
  'files.upload',
42
  r'\buploaded\b',
43
  r'\bimg_path\b',
44
+ # note: do NOT ban 'detect_and_classify(' so the function def stays intact
45
  ]
46
 
47
  collected_cells = []
 
69
  # ensure top-level code does not start with an indented 'pass'
70
  safe_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', safe_code, flags=re.M)
71
 
72
+ # Ensure numpy is available (some extracted cells use np)
73
+ if not re.search(r'(^|\n)\s*(import numpy\b|import numpy as\b|from numpy\b)', safe_code):
74
+ safe_code = "import numpy as np\n\n" + safe_code
75
+
76
  with open("yolo_converted.py", "w", encoding="utf-8") as fh:
77
  fh.write(safe_code)
78
 
 
99
  raise RuntimeError("detect_and_classify() not found in the extracted cells. Please ensure cell indices are correct.")
100
  print("✅ detect_and_classify() found and loaded.")
101
  # ---------------------------
102
+ # End extraction block
103
  # ---------------------------
104
 
105
 
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -1,103 +1,114 @@
1
- # -*- coding: utf-8 -*-
2
- """GradioUI.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
- """
9
-
10
  import os
11
- os.system("pip install seaborn --quiet")
 
 
 
 
12
 
 
 
13
 
14
- import json
15
  import torch
16
  import gradio as gr
17
  from PIL import Image
18
- import nbformat
19
- from nbconvert import PythonExporter
20
- import runpy
21
  from datasets import load_dataset
22
 
23
- # --- Convert YOLO notebook to Python ---
24
  if not os.path.exists("YOLO.ipynb"):
25
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
26
 
27
- # Read YOLO.ipynb
28
- with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
- # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
- # --- Patch the YOLO notebook code to skip testing lines safely ---
33
- for cell in nb.cells:
34
- if cell.cell_type == "code":
35
- lines = []
36
- for line in cell.source.splitlines():
37
- bad_patterns = [
38
- "!", "%",
39
- "google.colab",
40
- "files.upload",
41
- "uploaded",
42
- "img_path",
43
- "detect_and_classify(",
44
- "print(",
45
- "display("
46
- ]
47
- if any(p in line for p in bad_patterns):
48
- # Keep Python structure valid (avoid empty if-blocks)
49
- lines.append(" pass # skipped during conversion")
50
- continue
51
- lines.append(line)
52
- cell.source = "\n".join(lines)
53
-
54
-
55
- # --- Export cleaned notebook to Python (via nbformat export) ---
56
- py_exporter = PythonExporter()
57
- (code, _) = py_exporter.from_notebook_node(nb)
58
-
59
- # write initial converted file
60
- with open("yolo_converted.py", "w") as f:
61
- f.write(code)
62
-
63
- # --- Post-process the generated file to fix indentation issues from removed lines ---
64
- import re
65
-
66
- with open("yolo_converted.py", "r") as f:
67
- conv_code = f.read()
68
-
69
- # 1) Replace any lines that are only indented 'pass # skipped during conversion'
70
- # with an unindented version so they don't break top-level structure.
71
- conv_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', conv_code, flags=re.M)
72
-
73
- # 2) If any 'pass # skipped during conversion' directly follows a top-level statement
74
- # with incorrect indentation, keep them as 'pass' but ensure indentation matches previous block.
75
- # (This is conservative; we only normalize leading whitespace for the placeholder)
76
- # Already handled by the regex above.
77
-
78
- # 3) Remove any leading 'pass # skipped...' at the very top of the file (if present)
79
- conv_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', conv_code, count=1, flags=re.M)
80
-
81
- # Save cleaned code back
82
- with open("yolo_converted.py", "w") as f:
83
- f.write(conv_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # --- Run the converted YOLO script ---
86
- mod = runpy.run_path("yolo_converted.py")
87
  detect_and_classify = mod.get("detect_and_classify")
88
  if not detect_and_classify:
89
- raise RuntimeError("detect_and_classify() not found in YOLO.ipynb")
 
 
 
 
90
 
91
- print("✅ YOLO pipeline loaded successfully")
92
 
93
- # --- Load class names ---
94
  try:
95
  ds = load_dataset("tanganke/stanford_cars")
96
  class_names = ds["train"].features["label"].names
97
- with open("class_names.json", "w") as f:
98
  json.dump(class_names, f)
 
99
  except Exception as e:
100
- print("Warning: Could not load dataset class names.", e)
101
  class_names = None
102
 
103
  # --- Gradio UI ---
@@ -106,24 +117,28 @@ def gradio_interface(image):
106
  return "Please upload an image."
107
  temp_path = "temp_image.png"
108
  image.save(temp_path)
109
-
110
  try:
111
  results = detect_and_classify(temp_path)
112
  except Exception as e:
113
  return f"❌ Error running YOLO pipeline: {e}"
114
  finally:
115
- os.remove(temp_path)
 
116
 
117
  if not results:
118
  return "No cars detected."
119
 
120
  lines = [f"Cars detected: {len(results)}"]
121
  for i, item in enumerate(results, start=1):
122
- if len(item) == 4:
123
- crop, pred, color, conf = item
124
- else:
125
- crop, pred, color = item
 
126
  conf = None
 
 
 
127
 
128
  if isinstance(pred, int) and class_names and 0 <= pred < len(class_names):
129
  name = class_names[pred]
 
1
+ # app.py - Hugging Face ready
 
 
 
 
 
 
 
 
2
  import os
3
+ import re
4
+ import json
5
+ import runpy
6
+ import nbformat
7
+ from nbconvert import PythonExporter
8
 
9
+ # Prefer dependencies via requirements.txt. Small one-off installs if needed:
10
+ # os.system("pip install seaborn --quiet")
11
 
 
12
  import torch
13
  import gradio as gr
14
  from PIL import Image
 
 
 
15
  from datasets import load_dataset
16
 
17
+ # --- Ensure YOLO notebook exists ---
18
  if not os.path.exists("YOLO.ipynb"):
19
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
20
 
21
+ # --- Read notebook ---
22
+ with open("YOLO.ipynb", "r", encoding="utf-8") as f:
23
  nb = nbformat.read(f, as_version=4)
24
 
25
+ # ---------------------------
26
+ # Robust: extract only the essential notebook cells and write yolo_converted.py
27
+ # (this block was inserted per your request)
28
+ # ---------------------------
29
+ import nbformat, re, runpy, os, json
30
+
31
+ # indices of the important cells found in your YOLO.ipynb
32
+ # (these came from inspecting the uploaded notebook)
33
+ important_cell_indices = [2, 4, 6, 7]
34
+
35
+ with open("YOLO.ipynb", "r", encoding="utf-8") as f:
36
+ nb_all = nbformat.read(f, as_version=4)
37
+
38
+ # safe patterns to remove (Colab magics / upload/demo lines)
39
+ BAD_LINE_PATTERNS = [
40
+ r'^\s*!', # shell commands
41
+ r'^\s*%', # magics
42
+ 'google.colab',
43
+ 'files.upload',
44
+ r'\buploaded\b',
45
+ r'\bimg_path\b',
46
+ ]
47
+
48
+ collected_cells = []
49
+ for idx in important_cell_indices:
50
+ if idx < 0 or idx >= len(nb_all.cells):
51
+ continue
52
+ cell = nb_all.cells[idx]
53
+ if cell.cell_type != "code":
54
+ continue
55
+ lines = []
56
+ for line in cell.source.splitlines():
57
+ if any(re.search(p, line) for p in BAD_LINE_PATTERNS):
58
+ # skip Colab-only or demo lines inside the important cell
59
+ continue
60
+ lines.append(line)
61
+ # only append non-empty cell content
62
+ if any(l.strip() for l in lines):
63
+ collected_cells.append("\n".join(lines))
64
+
65
+ # join with explicit separators (makes debugging easier)
66
+ safe_code = "\n\n# ---- cell boundary ----\n\n".join(collected_cells)
67
+
68
+ # small normalization: remove any top-of-file stray "pass # skipped..." left by earlier attempts
69
+ safe_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', safe_code, count=1, flags=re.M)
70
+ # ensure top-level code does not start with an indented 'pass'
71
+ safe_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', safe_code, flags=re.M)
72
+
73
+ with open("yolo_converted.py", "w", encoding="utf-8") as fh:
74
+ fh.write(safe_code)
75
+
76
+ # quick log (first 800 chars) to help debug in HF logs
77
+ print("Wrote yolo_converted.py — preview:")
78
+ print(safe_code[:800].replace("\n", "\\n"))
79
+
80
+ # Try to load it
81
+ try:
82
+ mod = runpy.run_path("yolo_converted.py")
83
+ except Exception as e:
84
+ # show snippet to help debugging
85
+ head = ""
86
+ try:
87
+ with open("yolo_converted.py", "r", encoding="utf-8") as fh:
88
+ head = fh.read(2000)
89
+ except Exception:
90
+ head = "<could not read yolo_converted.py>"
91
+ raise RuntimeError(f"Failed to run yolo_converted.py: {e}\n--- head of converted file ---\n{head}")
92
 
93
+ # pull the function
 
94
  detect_and_classify = mod.get("detect_and_classify")
95
  if not detect_and_classify:
96
+ raise RuntimeError("detect_and_classify() not found in the extracted cells. Please ensure cell indices are correct.")
97
+ print("✅ detect_and_classify() found and loaded.")
98
+ # ---------------------------
99
+ # End inserted extraction block
100
+ # ---------------------------
101
 
 
102
 
103
+ # --- Load class names (optional, cached to file) ---
104
  try:
105
  ds = load_dataset("tanganke/stanford_cars")
106
  class_names = ds["train"].features["label"].names
107
+ with open("class_names.json", "w", encoding="utf-8") as f:
108
  json.dump(class_names, f)
109
+ print(f"✅ Loaded {len(class_names)} class names")
110
  except Exception as e:
111
+ print("⚠️ Could not load dataset class names:", e)
112
  class_names = None
113
 
114
  # --- Gradio UI ---
 
117
  return "Please upload an image."
118
  temp_path = "temp_image.png"
119
  image.save(temp_path)
 
120
  try:
121
  results = detect_and_classify(temp_path)
122
  except Exception as e:
123
  return f"❌ Error running YOLO pipeline: {e}"
124
  finally:
125
+ if os.path.exists(temp_path):
126
+ os.remove(temp_path)
127
 
128
  if not results:
129
  return "No cars detected."
130
 
131
  lines = [f"Cars detected: {len(results)}"]
132
  for i, item in enumerate(results, start=1):
133
+ # item may be (crop, pred_idx, color) or (crop, pred_idx, color, conf)
134
+ if isinstance(item, (list, tuple)) and len(item) == 4:
135
+ _, pred, color, conf = item
136
+ elif isinstance(item, (list, tuple)) and len(item) >= 3:
137
+ _, pred, color = item[:3]
138
  conf = None
139
+ else:
140
+ lines.append(f"Car {i}: {item}")
141
+ continue
142
 
143
  if isinstance(pred, int) and class_names and 0 <= pred < len(class_names):
144
  name = class_names[pred]
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt CHANGED
@@ -12,3 +12,8 @@ datasets
12
 
13
  nbformat
14
  nbconvert
 
 
 
 
 
 
12
 
13
  nbformat
14
  nbconvert
15
+
16
+ ipython
17
+
18
+ seaborn
19
+ gitpython
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -52,12 +52,36 @@ for cell in nb.cells:
52
  cell.source = "\n".join(lines)
53
 
54
 
55
- # --- Export cleaned notebook to Python ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
 
 
58
  with open("yolo_converted.py", "w") as f:
59
  f.write(code)
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # --- Run the converted YOLO script ---
62
  mod = runpy.run_path("yolo_converted.py")
63
  detect_and_classify = mod.get("detect_and_classify")
 
52
  cell.source = "\n".join(lines)
53
 
54
 
55
+ # --- Export cleaned notebook to Python (via nbformat export) ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
58
+
59
+ # write initial converted file
60
  with open("yolo_converted.py", "w") as f:
61
  f.write(code)
62
 
63
+ # --- Post-process the generated file to fix indentation issues from removed lines ---
64
+ import re
65
+
66
+ with open("yolo_converted.py", "r") as f:
67
+ conv_code = f.read()
68
+
69
+ # 1) Replace any lines that are only indented 'pass # skipped during conversion'
70
+ # with an unindented version so they don't break top-level structure.
71
+ conv_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', conv_code, flags=re.M)
72
+
73
+ # 2) If any 'pass # skipped during conversion' directly follows a top-level statement
74
+ # with incorrect indentation, keep them as 'pass' but ensure indentation matches previous block.
75
+ # (This is conservative; we only normalize leading whitespace for the placeholder)
76
+ # Already handled by the regex above.
77
+
78
+ # 3) Remove any leading 'pass # skipped...' at the very top of the file (if present)
79
+ conv_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', conv_code, count=1, flags=re.M)
80
+
81
+ # Save cleaned code back
82
+ with open("yolo_converted.py", "w") as f:
83
+ f.write(conv_code)
84
+
85
  # --- Run the converted YOLO script ---
86
  mod = runpy.run_path("yolo_converted.py")
87
  detect_and_classify = mod.get("detect_and_classify")
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -29,28 +29,29 @@ with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
- # --- Patch the YOLO notebook code to skip testing lines ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
- if (
38
- line.strip().startswith("!") or
39
- line.strip().startswith("%") or
40
- "google.colab" in line or
41
- "files.upload" in line or
42
- "uploaded" in line or
43
- "img_path" in line or
44
- "detect_and_classify(" in line or # skip auto test calls
45
- "print(" in line or # skip print-only outputs
46
- "display(" in line # skip Jupyter displays
47
- ):
 
 
48
  continue
49
  lines.append(line)
50
  cell.source = "\n".join(lines)
51
 
52
 
53
-
54
  # --- Export cleaned notebook to Python ---
55
  py_exporter = PythonExporter()
56
  (code, _) = py_exporter.from_notebook_node(nb)
 
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
+ # --- Patch the YOLO notebook code to skip testing lines safely ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
+ bad_patterns = [
38
+ "!", "%",
39
+ "google.colab",
40
+ "files.upload",
41
+ "uploaded",
42
+ "img_path",
43
+ "detect_and_classify(",
44
+ "print(",
45
+ "display("
46
+ ]
47
+ if any(p in line for p in bad_patterns):
48
+ # Keep Python structure valid (avoid empty if-blocks)
49
+ lines.append(" pass # skipped during conversion")
50
  continue
51
  lines.append(line)
52
  cell.source = "\n".join(lines)
53
 
54
 
 
55
  # --- Export cleaned notebook to Python ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -29,21 +29,28 @@ with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
 
32
  for cell in nb.cells:
33
  if cell.cell_type == "code":
34
  lines = []
35
  for line in cell.source.splitlines():
36
- if (
37
  line.strip().startswith("!") or
38
  line.strip().startswith("%") or
39
  "google.colab" in line or
40
- "files.upload" in line
 
 
 
 
 
41
  ):
42
  continue
43
  lines.append(line)
44
  cell.source = "\n".join(lines)
45
 
46
 
 
47
  # --- Export cleaned notebook to Python ---
48
  py_exporter = PythonExporter()
49
  (code, _) = py_exporter.from_notebook_node(nb)
 
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
+ # --- Patch the YOLO notebook code to skip testing lines ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
+ if (
38
  line.strip().startswith("!") or
39
  line.strip().startswith("%") or
40
  "google.colab" in line or
41
+ "files.upload" in line or
42
+ "uploaded" in line or
43
+ "img_path" in line or
44
+ "detect_and_classify(" in line or # skip auto test calls
45
+ "print(" in line or # skip print-only outputs
46
+ "display(" in line # skip Jupyter displays
47
  ):
48
  continue
49
  lines.append(line)
50
  cell.source = "\n".join(lines)
51
 
52
 
53
+
54
  # --- Export cleaned notebook to Python ---
55
  py_exporter = PythonExporter()
56
  (code, _) = py_exporter.from_notebook_node(nb)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -8,6 +8,9 @@ Original file is located at
8
  """
9
 
10
  import os
 
 
 
11
  import json
12
  import torch
13
  import gradio as gr
@@ -25,15 +28,21 @@ if not os.path.exists("YOLO.ipynb"):
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
27
 
28
- # --- Clean notebook magic commands (!pip, !git, %cd, etc.) ---
29
  for cell in nb.cells:
30
  if cell.cell_type == "code":
31
- cleaned_lines = []
32
  for line in cell.source.splitlines():
33
- if line.strip().startswith(("!", "%")):
 
 
 
 
 
34
  continue
35
- cleaned_lines.append(line)
36
- cell.source = "\n".join(cleaned_lines)
 
37
 
38
  # --- Export cleaned notebook to Python ---
39
  py_exporter = PythonExporter()
 
8
  """
9
 
10
  import os
11
+ os.system("pip install seaborn --quiet")
12
+
13
+
14
  import json
15
  import torch
16
  import gradio as gr
 
28
  with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
+ # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
  for cell in nb.cells:
33
  if cell.cell_type == "code":
34
+ lines = []
35
  for line in cell.source.splitlines():
36
+ if (
37
+ line.strip().startswith("!") or
38
+ line.strip().startswith("%") or
39
+ "google.colab" in line or
40
+ "files.upload" in line
41
+ ):
42
  continue
43
+ lines.append(line)
44
+ cell.source = "\n".join(lines)
45
+
46
 
47
  # --- Export cleaned notebook to Python ---
48
  py_exporter = PythonExporter()
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -21,14 +21,27 @@ from datasets import load_dataset
21
  if not os.path.exists("YOLO.ipynb"):
22
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
 
24
- # Convert YOLO.ipynb → yolo_converted.py
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
 
 
 
 
 
 
 
 
 
 
 
 
27
  py_exporter = PythonExporter()
28
  (code, _) = py_exporter.from_notebook_node(nb)
29
  with open("yolo_converted.py", "w") as f:
30
  f.write(code)
31
 
 
32
  mod = runpy.run_path("yolo_converted.py")
33
  detect_and_classify = mod.get("detect_and_classify")
34
  if not detect_and_classify:
 
21
  if not os.path.exists("YOLO.ipynb"):
22
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
 
24
+ # Read YOLO.ipynb
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
27
+
28
+ # --- Clean notebook magic commands (!pip, !git, %cd, etc.) ---
29
+ for cell in nb.cells:
30
+ if cell.cell_type == "code":
31
+ cleaned_lines = []
32
+ for line in cell.source.splitlines():
33
+ if line.strip().startswith(("!", "%")):
34
+ continue
35
+ cleaned_lines.append(line)
36
+ cell.source = "\n".join(cleaned_lines)
37
+
38
+ # --- Export cleaned notebook to Python ---
39
  py_exporter = PythonExporter()
40
  (code, _) = py_exporter.from_notebook_node(nb)
41
  with open("yolo_converted.py", "w") as f:
42
  f.write(code)
43
 
44
+ # --- Run the converted YOLO script ---
45
  mod = runpy.run_path("yolo_converted.py")
46
  detect_and_classify = mod.get("detect_and_classify")
47
  if not detect_and_classify:
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt CHANGED
@@ -9,3 +9,6 @@ opencv-python
9
  timm
10
  transformers
11
  datasets
 
 
 
 
9
  timm
10
  transformers
11
  datasets
12
+
13
+ nbformat
14
+ nbconvert
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -7,146 +7,75 @@ Original file is located at
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
 
 
 
10
  import gradio as gr
11
  from PIL import Image
12
- import torch
13
- import os
14
-
15
- import os
16
- from google.colab import files
17
-
18
- if not os.path.exists('YOLO.ipynb'):
19
- print("Please upload YOLO.ipynb (the script exported from your YOLO notebook).")
20
- uploaded = files.upload() # upload YOLO.ipynb
21
- print("Uploaded:", list(uploaded.keys()))
22
- else:
23
- print("YOLO.ipynb already present.")
24
-
25
- !ls /content
26
-
27
  import nbformat
28
  from nbconvert import PythonExporter
29
  import runpy
 
30
 
31
- # Convert YOLO.ipynb to a .py script dynamically
 
 
 
 
32
  with open("YOLO.ipynb") as f:
33
  nb = nbformat.read(f, as_version=4)
34
-
35
- # Find the cell that loads car_classifier.pth and modify the path
36
- # Also, remove any code that tries to open the notebook file as an image
37
- modified_cells = []
38
- for cell in nb.cells:
39
- if cell.cell_type == 'code':
40
- # This is a heuristic: look for lines containing 'car_classifier.pth'
41
- if 'car_classifier.pth' in cell.source:
42
- cell.source = cell.source.replace("'car_classifier.pth'", "'/content/car_classifier.pth'")
43
- cell.source = cell.source.replace('"car_classifier.pth"', '"/content/car_classifier.pth"')
44
-
45
- # Heuristic to remove code that might try to open the notebook as an image
46
- if 'Image.open(' in cell.source and 'YOLO.ipynb' in cell.source:
47
- cell.source = '# Removed potential image loading of notebook file:\n#' + cell.source
48
-
49
  py_exporter = PythonExporter()
50
  (code, _) = py_exporter.from_notebook_node(nb)
51
-
52
- # Save temporarily as script
53
  with open("yolo_converted.py", "w") as f:
54
  f.write(code)
55
 
56
- # Now safely import detect_and_classify() from that converted script
57
  mod = runpy.run_path("yolo_converted.py")
58
  detect_and_classify = mod.get("detect_and_classify")
59
-
60
  if not detect_and_classify:
61
- raise RuntimeError("Function detect_and_classify not found in YOLO.ipynb")
62
 
63
- print("✅ YOLO function imported successfully")
64
 
65
- import torch, os, json
66
- pth = "/content/car_classifier.pth"
67
- print("Exists:", os.path.exists(pth))
68
- ckpt = torch.load(pth, map_location="cpu")
69
- print("Type:", type(ckpt))
70
-
71
- if isinstance(ckpt, dict):
72
- keys = list(ckpt.keys())
73
- print("Checkpoint keys (first 20):", keys[:20])
74
- # If it's a pure state_dict, it will look like parameter names (e.g. 'conv1.weight')
75
- # If it's a wrapped checkpoint, it may contain 'model_state_dict' or 'class_names'
76
- else:
77
- print("Checkpoint is not a dict; it's probably a raw model object.")
78
-
79
- !pip install -q datasets
80
-
81
- from datasets import load_dataset
82
- ds = load_dataset("tanganke/stanford_cars")
83
- # HF dataset provides label names in the train feature
84
- class_names = ds["train"].features["label"].names
85
- print("Loaded", len(class_names), "class names. Sample:", class_names[:10])
86
-
87
- # Save to disk for reuse
88
- import json
89
- with open("class_names.json", "w") as f:
90
- json.dump(class_names, f, indent=2)
91
- print("Saved class_names.json")
92
-
93
- import json, os
94
- if os.path.exists("class_names.json"):
95
- with open("class_names.json") as f:
96
- class_names = json.load(f)
97
- print("Loaded class_names from file, len =", len(class_names))
98
- else:
99
- print("class_names.json not found; run the HF cell above.")
100
-
101
- import gradio as gr
102
- import os
103
-
104
- # ensure class_names exists in the notebook (from previous cell)
105
  try:
106
- assert class_names is not None and len(class_names) > 0
107
- print("Using class_names with", len(class_names), "entries")
108
- except Exception:
 
 
 
109
  class_names = None
110
- print("class_names not available; will show numeric labels")
111
 
112
- def gradio_interface(image, *args, **kwargs):
 
113
  if image is None:
114
  return "Please upload an image."
115
-
116
  temp_path = "temp_image.png"
117
  image.save(temp_path)
118
 
119
  try:
120
- results = detect_and_classify(temp_path) # your notebook function
121
  except Exception as e:
122
  return f"❌ Error running YOLO pipeline: {e}"
123
  finally:
124
- if os.path.exists(temp_path):
125
- os.remove(temp_path)
126
 
127
  if not results:
128
  return "No cars detected."
129
 
130
  lines = [f"Cars detected: {len(results)}"]
131
-
132
  for i, item in enumerate(results, start=1):
133
- # handle both 3-tuple and 4-tuple safely
134
  if len(item) == 4:
135
  crop, pred, color, conf = item
136
  else:
137
  crop, pred, color = item
138
  conf = None
139
 
140
- # map pred -> human name if possible
141
- if isinstance(pred, int):
142
- if class_names and 0 <= pred < len(class_names):
143
- name = class_names[pred]
144
- else:
145
- name = f"Class {pred}"
146
  else:
147
  name = str(pred)
148
 
149
- # Format with confidence if available
150
  if conf is not None:
151
  lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
152
  else:
@@ -154,28 +83,13 @@ def gradio_interface(image, *args, **kwargs):
154
 
155
  return "\n".join(lines)
156
 
157
- # Launch Gradio Interface
158
  iface = gr.Interface(
159
  fn=gradio_interface,
160
  inputs=gr.Image(type="pil", label="Upload an Image"),
161
  outputs=gr.Textbox(label="Detection & Classification Results"),
162
  title="Car Detector + Classifier (YOLO)",
163
- description="Upload a car image and get its color, model, and confidence score.",
164
  )
165
- iface.launch(share=True)
166
-
167
- # Test the gradio_interface function with the venza.jpg image
168
- image_path = "/content/venza.jpg"
169
- try:
170
- # Open the image file
171
- image = Image.open(image_path)
172
- # Call the gradio_interface function, passing class_names
173
- test_output = gradio_interface(image, class_names)
174
- # Print the output
175
- print(test_output)
176
- except FileNotFoundError:
177
- print(f"Error: Image file not found at {image_path}")
178
- except Exception as e:
179
- print(f"An error occurred: {e}")
180
 
181
- !grep -n "results" YOLO.ipynb
 
 
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
10
+ import os
11
+ import json
12
+ import torch
13
  import gradio as gr
14
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  import nbformat
16
  from nbconvert import PythonExporter
17
  import runpy
18
+ from datasets import load_dataset
19
 
20
+ # --- Convert YOLO notebook to Python ---
21
+ if not os.path.exists("YOLO.ipynb"):
22
+ raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
+
24
+ # Convert YOLO.ipynb → yolo_converted.py
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  py_exporter = PythonExporter()
28
  (code, _) = py_exporter.from_notebook_node(nb)
 
 
29
  with open("yolo_converted.py", "w") as f:
30
  f.write(code)
31
 
 
32
  mod = runpy.run_path("yolo_converted.py")
33
  detect_and_classify = mod.get("detect_and_classify")
 
34
  if not detect_and_classify:
35
+ raise RuntimeError("detect_and_classify() not found in YOLO.ipynb")
36
 
37
+ print("✅ YOLO pipeline loaded successfully")
38
 
39
+ # --- Load class names ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  try:
41
+ ds = load_dataset("tanganke/stanford_cars")
42
+ class_names = ds["train"].features["label"].names
43
+ with open("class_names.json", "w") as f:
44
+ json.dump(class_names, f)
45
+ except Exception as e:
46
+ print("Warning: Could not load dataset class names.", e)
47
  class_names = None
 
48
 
49
+ # --- Gradio UI ---
50
+ def gradio_interface(image):
51
  if image is None:
52
  return "Please upload an image."
 
53
  temp_path = "temp_image.png"
54
  image.save(temp_path)
55
 
56
  try:
57
+ results = detect_and_classify(temp_path)
58
  except Exception as e:
59
  return f"❌ Error running YOLO pipeline: {e}"
60
  finally:
61
+ os.remove(temp_path)
 
62
 
63
  if not results:
64
  return "No cars detected."
65
 
66
  lines = [f"Cars detected: {len(results)}"]
 
67
  for i, item in enumerate(results, start=1):
 
68
  if len(item) == 4:
69
  crop, pred, color, conf = item
70
  else:
71
  crop, pred, color = item
72
  conf = None
73
 
74
+ if isinstance(pred, int) and class_names and 0 <= pred < len(class_names):
75
+ name = class_names[pred]
 
 
 
 
76
  else:
77
  name = str(pred)
78
 
 
79
  if conf is not None:
80
  lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
81
  else:
 
83
 
84
  return "\n".join(lines)
85
 
 
86
  iface = gr.Interface(
87
  fn=gradio_interface,
88
  inputs=gr.Image(type="pil", label="Upload an Image"),
89
  outputs=gr.Textbox(label="Detection & Classification Results"),
90
  title="Car Detector + Classifier (YOLO)",
91
+ description="Upload a car image and get its color, model, and confidence score."
92
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ if __name__ == "__main__":
95
+ iface.launch()
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -7,9 +7,9 @@ Original file is located at
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
10
- !pip install gradio --quiet
11
  import gradio as gr
12
  from PIL import Image
 
13
  import os
14
 
15
  import os
@@ -178,4 +178,4 @@ except FileNotFoundError:
178
  except Exception as e:
179
  print(f"An error occurred: {e}")
180
 
181
- !grep -n "results" YOLO.ipynb
 
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
 
10
  import gradio as gr
11
  from PIL import Image
12
+ import torch
13
  import os
14
 
15
  import os
 
178
  except Exception as e:
179
  print(f"An error occurred: {e}")
180
 
181
+ !grep -n "results" YOLO.ipynb
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/YOLO.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/car_classifier.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df2189a3b9547272dd7a962f5d05e15a0155c57f7b1e6fee41fb4e698d32666e
3
+ size 45188363
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt CHANGED
@@ -1 +1,11 @@
1
- trackio<1.0
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ ultralytics
4
+ gradio
5
+ pillow
6
+ numpy
7
+ matplotlib
8
+ opencv-python
9
+ timm
10
+ transformers
11
+ datasets
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Car Classifier Model
3
+ emoji: 🚗
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: "4.0.0"
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # AiModelCarClassifier
13
+ Creating and Running a Car Classifier Model...
14
+
15
+ ## Car Detector(YOLO + Custom Model)
16
+
17
+ This project uses **YOLOv5** for car detection and a **custom-trained classifier** for car model recognition and color identification.
18
+ It takes in any image (JPEG/PNG), detects cars, classifies the car make & model, and outputs color and confidence scores.
19
+
20
+ Example output:
21
+ - **Cars detected: 1**
22
+ - **Car 1: Gray/Silver Dodge Dakota Crew Cab 2010 (98.7% confident)**
23
+
24
+ ---
25
+
26
+ ## Overview
27
+
28
+ This project combines **object detection** and **image classification** in one simple pipeline:
29
+
30
+ 1. **YOLOv5** detects cars in the image.
31
+ 2. The detected car regions are cropped and passed into a **PyTorch classifier** (`car_classifier.pth`).
32
+ 3. A small color recognition helper determines the car’s dominant color.
33
+ 4. Results are displayed through a simple **Gradio UI** (or any frontend, e.g. HTML + Flask).
34
+
35
+ ---
36
+
37
+ ## Project Structure
38
+
39
+
40
+ ├── YOLO.ipynb # Main notebook for YOLO + classification logic
41
+ ├── car_classifier.pth # Trained PyTorch model for car model recognition
42
+ ├── app.py # Gradio (or Flask) app for running the interface
43
+ ├── class_names.json # (Optional) Human-readable class labels
44
+ ├── requirements.txt # Python dependencies
45
+ └── README.md # Project description
46
+
47
+ ---
48
+
49
+ ---
50
+
51
+ ## Works steps
52
+
53
+ 1. **Image Upload** → User uploads an image.
54
+ 2. **YOLOv5 Detection** → Detects car bounding boxes.
55
+ 3. **Classification** → Each car crop is classified using `car_classifier.pth`.
56
+ 4. **Color Recognition** → Extracts car color from the cropped region.
57
+ 5. **Output** → Displays model name, color, and confidence percentage.
58
+
59
+ ---
60
+ ## Model Details
61
+
62
+ - **YOLOv5**: Handles object detection (pretrained on COCO dataset).
63
+ - **Car Classifier (`car_classifier.pth`)**: Fine-tuned model trained on [Stanford Cars Dataset](https://www.kaggle.com/datasets/jessicali9530/stanford-cars).
64
+ - **Color Extractor**: Uses average RGB values to estimate color.
65
+
66
+ ---
67
+ ## install depencies
68
+ ```
69
+ pip install -r requirements.txt
70
+ ```
71
+
72
+
73
+ Then open the Gradio or local web interface that appears in your console.
74
+ ---
75
+
76
+ ## Setup & Run
77
+
78
+ Clone the repo:
79
+
80
+ Then open the Gradio or local web interface that appears in your console.
81
+ ```bash
82
+ https://github.com/<Your-Username>/AiModelCarClassifier.git
83
+ cd car-detector-classifier
84
+ ```
85
+
86
+
87
+
88
+ ## run the app
89
+ ```
90
+ python app.py
91
+ ```
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """GradioUI.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
+ """
9
+
10
+ !pip install gradio --quiet
11
+ import gradio as gr
12
+ from PIL import Image
13
+ import os
14
+
15
+ import os
16
+ from google.colab import files
17
+
18
+ if not os.path.exists('YOLO.ipynb'):
19
+ print("Please upload YOLO.ipynb (the script exported from your YOLO notebook).")
20
+ uploaded = files.upload() # upload YOLO.ipynb
21
+ print("Uploaded:", list(uploaded.keys()))
22
+ else:
23
+ print("YOLO.ipynb already present.")
24
+
25
+ !ls /content
26
+
27
+ import nbformat
28
+ from nbconvert import PythonExporter
29
+ import runpy
30
+
31
+ # Convert YOLO.ipynb to a .py script dynamically
32
+ with open("YOLO.ipynb") as f:
33
+ nb = nbformat.read(f, as_version=4)
34
+
35
+ # Find the cell that loads car_classifier.pth and modify the path
36
+ # Also, remove any code that tries to open the notebook file as an image
37
+ modified_cells = []
38
+ for cell in nb.cells:
39
+ if cell.cell_type == 'code':
40
+ # This is a heuristic: look for lines containing 'car_classifier.pth'
41
+ if 'car_classifier.pth' in cell.source:
42
+ cell.source = cell.source.replace("'car_classifier.pth'", "'/content/car_classifier.pth'")
43
+ cell.source = cell.source.replace('"car_classifier.pth"', '"/content/car_classifier.pth"')
44
+
45
+ # Heuristic to remove code that might try to open the notebook as an image
46
+ if 'Image.open(' in cell.source and 'YOLO.ipynb' in cell.source:
47
+ cell.source = '# Removed potential image loading of notebook file:\n#' + cell.source
48
+
49
+ py_exporter = PythonExporter()
50
+ (code, _) = py_exporter.from_notebook_node(nb)
51
+
52
+ # Save temporarily as script
53
+ with open("yolo_converted.py", "w") as f:
54
+ f.write(code)
55
+
56
+ # Now safely import detect_and_classify() from that converted script
57
+ mod = runpy.run_path("yolo_converted.py")
58
+ detect_and_classify = mod.get("detect_and_classify")
59
+
60
+ if not detect_and_classify:
61
+ raise RuntimeError("Function detect_and_classify not found in YOLO.ipynb")
62
+
63
+ print("✅ YOLO function imported successfully")
64
+
65
+ import torch, os, json
66
+ pth = "/content/car_classifier.pth"
67
+ print("Exists:", os.path.exists(pth))
68
+ ckpt = torch.load(pth, map_location="cpu")
69
+ print("Type:", type(ckpt))
70
+
71
+ if isinstance(ckpt, dict):
72
+ keys = list(ckpt.keys())
73
+ print("Checkpoint keys (first 20):", keys[:20])
74
+ # If it's a pure state_dict, it will look like parameter names (e.g. 'conv1.weight')
75
+ # If it's a wrapped checkpoint, it may contain 'model_state_dict' or 'class_names'
76
+ else:
77
+ print("Checkpoint is not a dict; it's probably a raw model object.")
78
+
79
+ !pip install -q datasets
80
+
81
+ from datasets import load_dataset
82
+ ds = load_dataset("tanganke/stanford_cars")
83
+ # HF dataset provides label names in the train feature
84
+ class_names = ds["train"].features["label"].names
85
+ print("Loaded", len(class_names), "class names. Sample:", class_names[:10])
86
+
87
+ # Save to disk for reuse
88
+ import json
89
+ with open("class_names.json", "w") as f:
90
+ json.dump(class_names, f, indent=2)
91
+ print("Saved class_names.json")
92
+
93
+ import json, os
94
+ if os.path.exists("class_names.json"):
95
+ with open("class_names.json") as f:
96
+ class_names = json.load(f)
97
+ print("Loaded class_names from file, len =", len(class_names))
98
+ else:
99
+ print("class_names.json not found; run the HF cell above.")
100
+
101
+ import gradio as gr
102
+ import os
103
+
104
+ # ensure class_names exists in the notebook (from previous cell)
105
+ try:
106
+ assert class_names is not None and len(class_names) > 0
107
+ print("Using class_names with", len(class_names), "entries")
108
+ except Exception:
109
+ class_names = None
110
+ print("class_names not available; will show numeric labels")
111
+
112
+ def gradio_interface(image, *args, **kwargs):
113
+ if image is None:
114
+ return "Please upload an image."
115
+
116
+ temp_path = "temp_image.png"
117
+ image.save(temp_path)
118
+
119
+ try:
120
+ results = detect_and_classify(temp_path) # your notebook function
121
+ except Exception as e:
122
+ return f"❌ Error running YOLO pipeline: {e}"
123
+ finally:
124
+ if os.path.exists(temp_path):
125
+ os.remove(temp_path)
126
+
127
+ if not results:
128
+ return "No cars detected."
129
+
130
+ lines = [f"Cars detected: {len(results)}"]
131
+
132
+ for i, item in enumerate(results, start=1):
133
+ # handle both 3-tuple and 4-tuple safely
134
+ if len(item) == 4:
135
+ crop, pred, color, conf = item
136
+ else:
137
+ crop, pred, color = item
138
+ conf = None
139
+
140
+ # map pred -> human name if possible
141
+ if isinstance(pred, int):
142
+ if class_names and 0 <= pred < len(class_names):
143
+ name = class_names[pred]
144
+ else:
145
+ name = f"Class {pred}"
146
+ else:
147
+ name = str(pred)
148
+
149
+ # Format with confidence if available
150
+ if conf is not None:
151
+ lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
152
+ else:
153
+ lines.append(f"Car {i}: {color} {name}")
154
+
155
+ return "\n".join(lines)
156
+
157
+ # Launch Gradio Interface
158
+ iface = gr.Interface(
159
+ fn=gradio_interface,
160
+ inputs=gr.Image(type="pil", label="Upload an Image"),
161
+ outputs=gr.Textbox(label="Detection & Classification Results"),
162
+ title="Car Detector + Classifier (YOLO)",
163
+ description="Upload a car image and get its color, model, and confidence score.",
164
+ )
165
+ iface.launch(share=True)
166
+
167
+ # Test the gradio_interface function with the venza.jpg image
168
+ image_path = "/content/venza.jpg"
169
+ try:
170
+ # Open the image file
171
+ image = Image.open(image_path)
172
+ # Call the gradio_interface function, passing class_names
173
+ test_output = gradio_interface(image, class_names)
174
+ # Print the output
175
+ print(test_output)
176
+ except FileNotFoundError:
177
+ print(f"Error: Image file not found at {image_path}")
178
+ except Exception as e:
179
+ print(f"An error occurred: {e}")
180
+
181
+ !grep -n "results" YOLO.ipynb
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ trackio<1.0
yolo_module.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # yolo_module.py
2
+ # A small, standalone wrapper for YOLOv5 detection + a saved PyTorch classifier.
3
+ # Designed to be imported by app.py (Hugging Face / Gradio).
4
+
5
+ import os
6
+ from PIL import Image
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torchvision import transforms, models
11
+ import torch.nn as nn
12
+
13
+ # Load YOLOv5 model (uses torch.hub — Ultralytics repo)
14
+ # NOTE: this will download yolov5s.pt the first time (cached in environment).
15
+ yolo = None
16
+ try:
17
+ yolo = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
18
+ except Exception as e:
19
+ # If users want to use ultralytics package or different method, handle gracefully.
20
+ print("Warning: could not load yolov5 via torch.hub:", e)
21
+ yolo = None
22
+
23
+ # -- Classifier model (ResNet18, 196 classes) --
24
+ model = None
25
+ transform = None
26
+ def _load_classifier():
27
+ global model, transform
28
+ # architecture
29
+ model = models.resnet18(pretrained=False)
30
+ model.fc = nn.Linear(model.fc.in_features, 196)
31
+ # find the checkpoint saved in repo or /content folder
32
+ model_path = "car_classifier.pth"
33
+ if not os.path.exists(model_path):
34
+ alt = os.path.join("content", "car_classifier.pth")
35
+ if os.path.exists(alt):
36
+ model_path = alt
37
+ if not os.path.exists(model_path):
38
+ # If missing, we keep model=None and later return an error
39
+ print("Warning: car_classifier.pth not found at root or /content. Classifier disabled.")
40
+ model = None
41
+ transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor()])
42
+ return
43
+
44
+ ckpt = torch.load(model_path, map_location="cpu")
45
+ # ckpt might be a full dict or a state_dict — handle both cases
46
+ if isinstance(ckpt, dict):
47
+ # common keys: "model_state_dict" or bare state_dict
48
+ if "model_state_dict" in ckpt:
49
+ state = ckpt["model_state_dict"]
50
+ elif any(k.startswith('conv1') for k in ckpt.keys()):
51
+ state = ckpt
52
+ else:
53
+ # unknown dict structure — try to find a nested state dict
54
+ possible = None
55
+ for v in ckpt.values():
56
+ if isinstance(v, dict) and any(k.startswith('conv1') for k in v.keys()):
57
+ possible = v
58
+ break
59
+ state = possible or ckpt
60
+ else:
61
+ # ckpt directly is probably a state_dict
62
+ state = ckpt
63
+
64
+ try:
65
+ model.load_state_dict(state)
66
+ model.eval()
67
+ print("✅ Loaded classifier from", model_path)
68
+ except Exception as e:
69
+ print("Warning: failed to load state_dict cleanly:", e)
70
+ model = None
71
+
72
+ transform = transforms.Compose([
73
+ transforms.Resize((224, 224)),
74
+ transforms.ToTensor(),
75
+ ])
76
+
77
+ # Try to load on import
78
+ _load_classifier()
79
+
80
+
81
+ # Simple color extractor (dominant-ish color)
82
+ def get_color_name(image_pil):
83
+ try:
84
+ img = image_pil.resize((50, 50))
85
+ arr = np.array(img).reshape(-1, 3)
86
+ avg = arr.mean(axis=0)
87
+ r, g, b = avg
88
+ # simple thresholds
89
+ if r > 150 and g < 100 and b < 100:
90
+ return "Red"
91
+ if b > 150 and r < 100 and g < 100:
92
+ return "Blue"
93
+ if g > 150 and r < 100 and b < 100:
94
+ return "Green"
95
+ if r > 200 and g > 200 and b > 200:
96
+ return "White"
97
+ if r < 50 and g < 50 and b < 50:
98
+ return "Black"
99
+ if r > 200 and g > 200 and b < 100:
100
+ return "Yellow"
101
+ return "Gray/Silver"
102
+ except Exception:
103
+ return "Unknown"
104
+
105
+ # The pipeline function expected by app.py
106
+ def detect_and_classify(img_path):
107
+ """
108
+ Input: img_path (str)
109
+ Output: list of tuples (PIL.Image crop, pred_class_idx (int), color_name (str), classifier_confidence (float or None))
110
+ If classifier not available, pred_class_idx may be integer index (if you still have names elsewhere) or None.
111
+ """
112
+ if not os.path.exists(img_path):
113
+ raise FileNotFoundError(f"Image not found: {img_path}")
114
+
115
+ # If YOLO not available, return helpful error
116
+ if yolo is None:
117
+ raise RuntimeError("YOLO model not loaded (yolo is None). Check logs for earlier warning.")
118
+
119
+ img = Image.open(img_path).convert("RGB")
120
+ # Run YOLO detection
121
+ results = yolo(img_path) # Ultralytics API: passing path or PIL works
122
+
123
+ # results.xyxy[0] is an Nx6 array: x1,y1,x2,y2,conf,cls
124
+ try:
125
+ dets = results.xyxy[0].cpu().numpy()
126
+ except Exception:
127
+ # fallback: try to convert via .pandas().xyxy[0]
128
+ try:
129
+ dets = results.pandas().xyxy[0].values
130
+ except Exception:
131
+ dets = []
132
+
133
+ preds = []
134
+ for det in dets:
135
+ try:
136
+ x1, y1, x2, y2, conf_det, cls = det
137
+ except Exception:
138
+ # if det is dict-like from pandas
139
+ try:
140
+ x1 = float(det[0]); y1 = float(det[1]); x2 = float(det[2]); y2 = float(det[3])
141
+ conf_det = float(det[4]); cls = float(det[5])
142
+ except Exception:
143
+ continue
144
+
145
+ if int(cls) != 2: # COCO class 2 == car
146
+ continue
147
+
148
+ # crop with PIL (ensure integer coords and within bounds)
149
+ x1i, y1i, x2i, y2i = map(int, [max(0, x1), max(0, y1), max(0, x2), max(0, y2)])
150
+ crop = img.crop((x1i, y1i, x2i, y2i))
151
+
152
+ # classifier
153
+ class_idx = None
154
+ class_conf = None
155
+ if model is not None:
156
+ try:
157
+ t = transform(crop).unsqueeze(0) # batch 1
158
+ with torch.no_grad():
159
+ out = model(t)
160
+ probs = F.softmax(out, dim=1)
161
+ class_conf = float(probs.max().item())
162
+ class_idx = int(probs.argmax().item())
163
+ except Exception as e:
164
+ # if classifier fails, leave class_idx None
165
+ class_idx = None
166
+ class_conf = None
167
+
168
+ # color
169
+ color = get_color_name(crop)
170
+
171
+ preds.append((crop, class_idx, color, class_conf))
172
+
173
+ return preds