github-actions commited on
Commit
a2c259a
·
1 Parent(s): f36d845

Sync from GitHub to Hugging Face

Browse files
Files changed (18) hide show
  1. requirements.txt +2 -0
  2. space_repo/space_repo/app.py +25 -1
  3. space_repo/space_repo/space_repo/space_repo/app.py +14 -13
  4. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +5 -3
  5. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +5 -1
  6. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +2 -1
  7. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +14 -5
  8. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +14 -1
  9. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +3 -0
  10. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +27 -113
  11. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +2 -2
  12. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/YOLO.ipynb +0 -0
  13. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/car_classifier.pth +3 -0
  14. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +11 -1
  15. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/.gitattributes +35 -0
  16. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/README.md +91 -0
  17. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py +181 -0
  18. space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt +1 -0
requirements.txt CHANGED
@@ -12,3 +12,5 @@ datasets
12
 
13
  nbformat
14
  nbconvert
 
 
 
12
 
13
  nbformat
14
  nbconvert
15
+
16
+ ipython
space_repo/space_repo/app.py CHANGED
@@ -52,12 +52,36 @@ for cell in nb.cells:
52
  cell.source = "\n".join(lines)
53
 
54
 
55
- # --- Export cleaned notebook to Python ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
 
 
58
  with open("yolo_converted.py", "w") as f:
59
  f.write(code)
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  # --- Run the converted YOLO script ---
62
  mod = runpy.run_path("yolo_converted.py")
63
  detect_and_classify = mod.get("detect_and_classify")
 
52
  cell.source = "\n".join(lines)
53
 
54
 
55
+ # --- Export cleaned notebook to Python (via nbformat export) ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
58
+
59
+ # write initial converted file
60
  with open("yolo_converted.py", "w") as f:
61
  f.write(code)
62
 
63
+ # --- Post-process the generated file to fix indentation issues from removed lines ---
64
+ import re
65
+
66
+ with open("yolo_converted.py", "r") as f:
67
+ conv_code = f.read()
68
+
69
+ # 1) Replace any lines that are only indented 'pass # skipped during conversion'
70
+ # with an unindented version so they don't break top-level structure.
71
+ conv_code = re.sub(r'^[ \t]+pass # skipped during conversion\s*$', 'pass # skipped during conversion\n', conv_code, flags=re.M)
72
+
73
+ # 2) If any 'pass # skipped during conversion' directly follows a top-level statement
74
+ # with incorrect indentation, keep them as 'pass' but ensure indentation matches previous block.
75
+ # (This is conservative; we only normalize leading whitespace for the placeholder)
76
+ # Already handled by the regex above.
77
+
78
+ # 3) Remove any leading 'pass # skipped...' at the very top of the file (if present)
79
+ conv_code = re.sub(r'^\s*pass # skipped during conversion\s*', '', conv_code, count=1, flags=re.M)
80
+
81
+ # Save cleaned code back
82
+ with open("yolo_converted.py", "w") as f:
83
+ f.write(conv_code)
84
+
85
  # --- Run the converted YOLO script ---
86
  mod = runpy.run_path("yolo_converted.py")
87
  detect_and_classify = mod.get("detect_and_classify")
space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -29,28 +29,29 @@ with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
- # --- Patch the YOLO notebook code to skip testing lines ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
- if (
38
- line.strip().startswith("!") or
39
- line.strip().startswith("%") or
40
- "google.colab" in line or
41
- "files.upload" in line or
42
- "uploaded" in line or
43
- "img_path" in line or
44
- "detect_and_classify(" in line or # skip auto test calls
45
- "print(" in line or # skip print-only outputs
46
- "display(" in line # skip Jupyter displays
47
- ):
 
 
48
  continue
49
  lines.append(line)
50
  cell.source = "\n".join(lines)
51
 
52
 
53
-
54
  # --- Export cleaned notebook to Python ---
55
  py_exporter = PythonExporter()
56
  (code, _) = py_exporter.from_notebook_node(nb)
 
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
+ # --- Patch the YOLO notebook code to skip testing lines safely ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
+ bad_patterns = [
38
+ "!", "%",
39
+ "google.colab",
40
+ "files.upload",
41
+ "uploaded",
42
+ "img_path",
43
+ "detect_and_classify(",
44
+ "print(",
45
+ "display("
46
+ ]
47
+ if any(p in line for p in bad_patterns):
48
+ # Keep Python structure valid (avoid empty if-blocks)
49
+ lines.append(" pass # skipped during conversion")
50
  continue
51
  lines.append(line)
52
  cell.source = "\n".join(lines)
53
 
54
 
 
55
  # --- Export cleaned notebook to Python ---
56
  py_exporter = PythonExporter()
57
  (code, _) = py_exporter.from_notebook_node(nb)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -34,14 +34,16 @@ for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
- if (
38
  line.strip().startswith("!") or
39
  line.strip().startswith("%") or
40
  "google.colab" in line or
41
  "files.upload" in line or
42
  "uploaded" in line or
43
- "img_path" in line or # 👈 Added this
44
- "detect_and_classify(" in line # 👈 Skip test calls
 
 
45
  ):
46
  continue
47
  lines.append(line)
 
34
  if cell.cell_type == "code":
35
  lines = []
36
  for line in cell.source.splitlines():
37
+ if (
38
  line.strip().startswith("!") or
39
  line.strip().startswith("%") or
40
  "google.colab" in line or
41
  "files.upload" in line or
42
  "uploaded" in line or
43
+ "img_path" in line or
44
+ "detect_and_classify(" in line or # skip auto test calls
45
+ "print(" in line or # skip print-only outputs
46
+ "display(" in line # skip Jupyter displays
47
  ):
48
  continue
49
  lines.append(line)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -29,6 +29,7 @@ with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
 
32
  for cell in nb.cells:
33
  if cell.cell_type == "code":
34
  lines = []
@@ -38,13 +39,16 @@ for cell in nb.cells:
38
  line.strip().startswith("%") or
39
  "google.colab" in line or
40
  "files.upload" in line or
41
- "uploaded" in line # new line from google colab affecting huggingface setup
 
 
42
  ):
43
  continue
44
  lines.append(line)
45
  cell.source = "\n".join(lines)
46
 
47
 
 
48
  # --- Export cleaned notebook to Python ---
49
  py_exporter = PythonExporter()
50
  (code, _) = py_exporter.from_notebook_node(nb)
 
29
  nb = nbformat.read(f, as_version=4)
30
 
31
  # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
+ # --- Patch the YOLO notebook code to skip testing lines ---
33
  for cell in nb.cells:
34
  if cell.cell_type == "code":
35
  lines = []
 
39
  line.strip().startswith("%") or
40
  "google.colab" in line or
41
  "files.upload" in line or
42
+ "uploaded" in line or
43
+ "img_path" in line or # 👈 Added this
44
+ "detect_and_classify(" in line # 👈 Skip test calls
45
  ):
46
  continue
47
  lines.append(line)
48
  cell.source = "\n".join(lines)
49
 
50
 
51
+
52
  # --- Export cleaned notebook to Python ---
53
  py_exporter = PythonExporter()
54
  (code, _) = py_exporter.from_notebook_node(nb)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -37,7 +37,8 @@ for cell in nb.cells:
37
  line.strip().startswith("!") or
38
  line.strip().startswith("%") or
39
  "google.colab" in line or
40
- "files.upload" in line
 
41
  ):
42
  continue
43
  lines.append(line)
 
37
  line.strip().startswith("!") or
38
  line.strip().startswith("%") or
39
  "google.colab" in line or
40
+ "files.upload" in line or
41
+ "uploaded" in line # new line from google colab affecting huggingface setup
42
  ):
43
  continue
44
  lines.append(line)
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -8,6 +8,9 @@ Original file is located at
8
  """
9
 
10
  import os
 
 
 
11
  import json
12
  import torch
13
  import gradio as gr
@@ -25,15 +28,21 @@ if not os.path.exists("YOLO.ipynb"):
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
27
 
28
- # --- Clean notebook magic commands (!pip, !git, %cd, etc.) ---
29
  for cell in nb.cells:
30
  if cell.cell_type == "code":
31
- cleaned_lines = []
32
  for line in cell.source.splitlines():
33
- if line.strip().startswith(("!", "%")):
 
 
 
 
 
34
  continue
35
- cleaned_lines.append(line)
36
- cell.source = "\n".join(cleaned_lines)
 
37
 
38
  # --- Export cleaned notebook to Python ---
39
  py_exporter = PythonExporter()
 
8
  """
9
 
10
  import os
11
+ os.system("pip install seaborn --quiet")
12
+
13
+
14
  import json
15
  import torch
16
  import gradio as gr
 
28
  with open("YOLO.ipynb") as f:
29
  nb = nbformat.read(f, as_version=4)
30
 
31
+ # Remove or skip Google Colab imports and magic commands (! or %) or google colab file picker
32
  for cell in nb.cells:
33
  if cell.cell_type == "code":
34
+ lines = []
35
  for line in cell.source.splitlines():
36
+ if (
37
+ line.strip().startswith("!") or
38
+ line.strip().startswith("%") or
39
+ "google.colab" in line or
40
+ "files.upload" in line
41
+ ):
42
  continue
43
+ lines.append(line)
44
+ cell.source = "\n".join(lines)
45
+
46
 
47
  # --- Export cleaned notebook to Python ---
48
  py_exporter = PythonExporter()
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -21,14 +21,27 @@ from datasets import load_dataset
21
  if not os.path.exists("YOLO.ipynb"):
22
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
 
24
- # Convert YOLO.ipynb → yolo_converted.py
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
 
 
 
 
 
 
 
 
 
 
 
 
27
  py_exporter = PythonExporter()
28
  (code, _) = py_exporter.from_notebook_node(nb)
29
  with open("yolo_converted.py", "w") as f:
30
  f.write(code)
31
 
 
32
  mod = runpy.run_path("yolo_converted.py")
33
  detect_and_classify = mod.get("detect_and_classify")
34
  if not detect_and_classify:
 
21
  if not os.path.exists("YOLO.ipynb"):
22
  raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
 
24
+ # Read YOLO.ipynb
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
27
+
28
+ # --- Clean notebook magic commands (!pip, !git, %cd, etc.) ---
29
+ for cell in nb.cells:
30
+ if cell.cell_type == "code":
31
+ cleaned_lines = []
32
+ for line in cell.source.splitlines():
33
+ if line.strip().startswith(("!", "%")):
34
+ continue
35
+ cleaned_lines.append(line)
36
+ cell.source = "\n".join(cleaned_lines)
37
+
38
+ # --- Export cleaned notebook to Python ---
39
  py_exporter = PythonExporter()
40
  (code, _) = py_exporter.from_notebook_node(nb)
41
  with open("yolo_converted.py", "w") as f:
42
  f.write(code)
43
 
44
+ # --- Run the converted YOLO script ---
45
  mod = runpy.run_path("yolo_converted.py")
46
  detect_and_classify = mod.get("detect_and_classify")
47
  if not detect_and_classify:
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt CHANGED
@@ -9,3 +9,6 @@ opencv-python
9
  timm
10
  transformers
11
  datasets
 
 
 
 
9
  timm
10
  transformers
11
  datasets
12
+
13
+ nbformat
14
+ nbconvert
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -7,146 +7,75 @@ Original file is located at
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
 
 
 
10
  import gradio as gr
11
  from PIL import Image
12
- import torch
13
- import os
14
-
15
- import os
16
- from google.colab import files
17
-
18
- if not os.path.exists('YOLO.ipynb'):
19
- print("Please upload YOLO.ipynb (the script exported from your YOLO notebook).")
20
- uploaded = files.upload() # upload YOLO.ipynb
21
- print("Uploaded:", list(uploaded.keys()))
22
- else:
23
- print("YOLO.ipynb already present.")
24
-
25
- !ls /content
26
-
27
  import nbformat
28
  from nbconvert import PythonExporter
29
  import runpy
 
30
 
31
- # Convert YOLO.ipynb to a .py script dynamically
 
 
 
 
32
  with open("YOLO.ipynb") as f:
33
  nb = nbformat.read(f, as_version=4)
34
-
35
- # Find the cell that loads car_classifier.pth and modify the path
36
- # Also, remove any code that tries to open the notebook file as an image
37
- modified_cells = []
38
- for cell in nb.cells:
39
- if cell.cell_type == 'code':
40
- # This is a heuristic: look for lines containing 'car_classifier.pth'
41
- if 'car_classifier.pth' in cell.source:
42
- cell.source = cell.source.replace("'car_classifier.pth'", "'/content/car_classifier.pth'")
43
- cell.source = cell.source.replace('"car_classifier.pth"', '"/content/car_classifier.pth"')
44
-
45
- # Heuristic to remove code that might try to open the notebook as an image
46
- if 'Image.open(' in cell.source and 'YOLO.ipynb' in cell.source:
47
- cell.source = '# Removed potential image loading of notebook file:\n#' + cell.source
48
-
49
  py_exporter = PythonExporter()
50
  (code, _) = py_exporter.from_notebook_node(nb)
51
-
52
- # Save temporarily as script
53
  with open("yolo_converted.py", "w") as f:
54
  f.write(code)
55
 
56
- # Now safely import detect_and_classify() from that converted script
57
  mod = runpy.run_path("yolo_converted.py")
58
  detect_and_classify = mod.get("detect_and_classify")
59
-
60
  if not detect_and_classify:
61
- raise RuntimeError("Function detect_and_classify not found in YOLO.ipynb")
62
 
63
- print("✅ YOLO function imported successfully")
64
 
65
- import torch, os, json
66
- pth = "/content/car_classifier.pth"
67
- print("Exists:", os.path.exists(pth))
68
- ckpt = torch.load(pth, map_location="cpu")
69
- print("Type:", type(ckpt))
70
-
71
- if isinstance(ckpt, dict):
72
- keys = list(ckpt.keys())
73
- print("Checkpoint keys (first 20):", keys[:20])
74
- # If it's a pure state_dict, it will look like parameter names (e.g. 'conv1.weight')
75
- # If it's a wrapped checkpoint, it may contain 'model_state_dict' or 'class_names'
76
- else:
77
- print("Checkpoint is not a dict; it's probably a raw model object.")
78
-
79
- !pip install -q datasets
80
-
81
- from datasets import load_dataset
82
- ds = load_dataset("tanganke/stanford_cars")
83
- # HF dataset provides label names in the train feature
84
- class_names = ds["train"].features["label"].names
85
- print("Loaded", len(class_names), "class names. Sample:", class_names[:10])
86
-
87
- # Save to disk for reuse
88
- import json
89
- with open("class_names.json", "w") as f:
90
- json.dump(class_names, f, indent=2)
91
- print("Saved class_names.json")
92
-
93
- import json, os
94
- if os.path.exists("class_names.json"):
95
- with open("class_names.json") as f:
96
- class_names = json.load(f)
97
- print("Loaded class_names from file, len =", len(class_names))
98
- else:
99
- print("class_names.json not found; run the HF cell above.")
100
-
101
- import gradio as gr
102
- import os
103
-
104
- # ensure class_names exists in the notebook (from previous cell)
105
  try:
106
- assert class_names is not None and len(class_names) > 0
107
- print("Using class_names with", len(class_names), "entries")
108
- except Exception:
 
 
 
109
  class_names = None
110
- print("class_names not available; will show numeric labels")
111
 
112
- def gradio_interface(image, *args, **kwargs):
 
113
  if image is None:
114
  return "Please upload an image."
115
-
116
  temp_path = "temp_image.png"
117
  image.save(temp_path)
118
 
119
  try:
120
- results = detect_and_classify(temp_path) # your notebook function
121
  except Exception as e:
122
  return f"❌ Error running YOLO pipeline: {e}"
123
  finally:
124
- if os.path.exists(temp_path):
125
- os.remove(temp_path)
126
 
127
  if not results:
128
  return "No cars detected."
129
 
130
  lines = [f"Cars detected: {len(results)}"]
131
-
132
  for i, item in enumerate(results, start=1):
133
- # handle both 3-tuple and 4-tuple safely
134
  if len(item) == 4:
135
  crop, pred, color, conf = item
136
  else:
137
  crop, pred, color = item
138
  conf = None
139
 
140
- # map pred -> human name if possible
141
- if isinstance(pred, int):
142
- if class_names and 0 <= pred < len(class_names):
143
- name = class_names[pred]
144
- else:
145
- name = f"Class {pred}"
146
  else:
147
  name = str(pred)
148
 
149
- # Format with confidence if available
150
  if conf is not None:
151
  lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
152
  else:
@@ -154,28 +83,13 @@ def gradio_interface(image, *args, **kwargs):
154
 
155
  return "\n".join(lines)
156
 
157
- # Launch Gradio Interface
158
  iface = gr.Interface(
159
  fn=gradio_interface,
160
  inputs=gr.Image(type="pil", label="Upload an Image"),
161
  outputs=gr.Textbox(label="Detection & Classification Results"),
162
  title="Car Detector + Classifier (YOLO)",
163
- description="Upload a car image and get its color, model, and confidence score.",
164
  )
165
- iface.launch(share=True)
166
-
167
- # Test the gradio_interface function with the venza.jpg image
168
- image_path = "/content/venza.jpg"
169
- try:
170
- # Open the image file
171
- image = Image.open(image_path)
172
- # Call the gradio_interface function, passing class_names
173
- test_output = gradio_interface(image, class_names)
174
- # Print the output
175
- print(test_output)
176
- except FileNotFoundError:
177
- print(f"Error: Image file not found at {image_path}")
178
- except Exception as e:
179
- print(f"An error occurred: {e}")
180
 
181
- !grep -n "results" YOLO.ipynb
 
 
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
10
+ import os
11
+ import json
12
+ import torch
13
  import gradio as gr
14
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  import nbformat
16
  from nbconvert import PythonExporter
17
  import runpy
18
+ from datasets import load_dataset
19
 
20
+ # --- Convert YOLO notebook to Python ---
21
+ if not os.path.exists("YOLO.ipynb"):
22
+ raise FileNotFoundError("YOLO.ipynb not found in app directory!")
23
+
24
+ # Convert YOLO.ipynb → yolo_converted.py
25
  with open("YOLO.ipynb") as f:
26
  nb = nbformat.read(f, as_version=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  py_exporter = PythonExporter()
28
  (code, _) = py_exporter.from_notebook_node(nb)
 
 
29
  with open("yolo_converted.py", "w") as f:
30
  f.write(code)
31
 
 
32
  mod = runpy.run_path("yolo_converted.py")
33
  detect_and_classify = mod.get("detect_and_classify")
 
34
  if not detect_and_classify:
35
+ raise RuntimeError("detect_and_classify() not found in YOLO.ipynb")
36
 
37
+ print("✅ YOLO pipeline loaded successfully")
38
 
39
+ # --- Load class names ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  try:
41
+ ds = load_dataset("tanganke/stanford_cars")
42
+ class_names = ds["train"].features["label"].names
43
+ with open("class_names.json", "w") as f:
44
+ json.dump(class_names, f)
45
+ except Exception as e:
46
+ print("Warning: Could not load dataset class names.", e)
47
  class_names = None
 
48
 
49
+ # --- Gradio UI ---
50
+ def gradio_interface(image):
51
  if image is None:
52
  return "Please upload an image."
 
53
  temp_path = "temp_image.png"
54
  image.save(temp_path)
55
 
56
  try:
57
+ results = detect_and_classify(temp_path)
58
  except Exception as e:
59
  return f"❌ Error running YOLO pipeline: {e}"
60
  finally:
61
+ os.remove(temp_path)
 
62
 
63
  if not results:
64
  return "No cars detected."
65
 
66
  lines = [f"Cars detected: {len(results)}"]
 
67
  for i, item in enumerate(results, start=1):
 
68
  if len(item) == 4:
69
  crop, pred, color, conf = item
70
  else:
71
  crop, pred, color = item
72
  conf = None
73
 
74
+ if isinstance(pred, int) and class_names and 0 <= pred < len(class_names):
75
+ name = class_names[pred]
 
 
 
 
76
  else:
77
  name = str(pred)
78
 
 
79
  if conf is not None:
80
  lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
81
  else:
 
83
 
84
  return "\n".join(lines)
85
 
 
86
  iface = gr.Interface(
87
  fn=gradio_interface,
88
  inputs=gr.Image(type="pil", label="Upload an Image"),
89
  outputs=gr.Textbox(label="Detection & Classification Results"),
90
  title="Car Detector + Classifier (YOLO)",
91
+ description="Upload a car image and get its color, model, and confidence score."
92
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ if __name__ == "__main__":
95
+ iface.launch()
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py CHANGED
@@ -7,9 +7,9 @@ Original file is located at
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
10
- !pip install gradio --quiet
11
  import gradio as gr
12
  from PIL import Image
 
13
  import os
14
 
15
  import os
@@ -178,4 +178,4 @@ except FileNotFoundError:
178
  except Exception as e:
179
  print(f"An error occurred: {e}")
180
 
181
- !grep -n "results" YOLO.ipynb
 
7
  https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
  """
9
 
 
10
  import gradio as gr
11
  from PIL import Image
12
+ import torch
13
  import os
14
 
15
  import os
 
178
  except Exception as e:
179
  print(f"An error occurred: {e}")
180
 
181
+ !grep -n "results" YOLO.ipynb
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/YOLO.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/car_classifier.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df2189a3b9547272dd7a962f5d05e15a0155c57f7b1e6fee41fb4e698d32666e
3
+ size 45188363
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt CHANGED
@@ -1 +1,11 @@
1
- trackio<1.0
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ ultralytics
4
+ gradio
5
+ pillow
6
+ numpy
7
+ matplotlib
8
+ opencv-python
9
+ timm
10
+ transformers
11
+ datasets
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Car Classifier Model
3
+ emoji: 🚗
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: "4.0.0"
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # AiModelCarClassifier
13
+ Creating and Running a Car Classifier Model...
14
+
15
+ ## Car Detector(YOLO + Custom Model)
16
+
17
+ This project uses **YOLOv5** for car detection and a **custom-trained classifier** for car model recognition and color identification.
18
+ It takes in any image (JPEG/PNG), detects cars, classifies the car make & model, and outputs color and confidence scores.
19
+
20
+ Example output:
21
+ - **Cars detected: 1**
22
+ - **Car 1: Gray/Silver Dodge Dakota Crew Cab 2010 (98.7% confident)**
23
+
24
+ ---
25
+
26
+ ## Overview
27
+
28
+ This project combines **object detection** and **image classification** in one simple pipeline:
29
+
30
+ 1. **YOLOv5** detects cars in the image.
31
+ 2. The detected car regions are cropped and passed into a **PyTorch classifier** (`car_classifier.pth`).
32
+ 3. A small color recognition helper determines the car’s dominant color.
33
+ 4. Results are displayed through a simple **Gradio UI** (or any frontend, e.g. HTML + Flask).
34
+
35
+ ---
36
+
37
+ ## Project Structure
38
+
39
+
40
+ ├── YOLO.ipynb # Main notebook for YOLO + classification logic
41
+ ├── car_classifier.pth # Trained PyTorch model for car model recognition
42
+ ├── app.py # Gradio (or Flask) app for running the interface
43
+ ├── class_names.json # (Optional) Human-readable class labels
44
+ ├── requirements.txt # Python dependencies
45
+ └── README.md # Project description
46
+
47
+ ---
48
+
49
+ ---
50
+
51
+ ## Works steps
52
+
53
+ 1. **Image Upload** → User uploads an image.
54
+ 2. **YOLOv5 Detection** → Detects car bounding boxes.
55
+ 3. **Classification** → Each car crop is classified using `car_classifier.pth`.
56
+ 4. **Color Recognition** → Extracts car color from the cropped region.
57
+ 5. **Output** → Displays model name, color, and confidence percentage.
58
+
59
+ ---
60
+ ## Model Details
61
+
62
+ - **YOLOv5**: Handles object detection (pretrained on COCO dataset).
63
+ - **Car Classifier (`car_classifier.pth`)**: Fine-tuned model trained on [Stanford Cars Dataset](https://www.kaggle.com/datasets/jessicali9530/stanford-cars).
64
+ - **Color Extractor**: Uses average RGB values to estimate color.
65
+
66
+ ---
67
+ ## install depencies
68
+ ```
69
+ pip install -r requirements.txt
70
+ ```
71
+
72
+
73
+ Then open the Gradio or local web interface that appears in your console.
74
+ ---
75
+
76
+ ## Setup & Run
77
+
78
+ Clone the repo:
79
+
80
+ Then open the Gradio or local web interface that appears in your console.
81
+ ```bash
82
+ https://github.com/<Your-Username>/AiModelCarClassifier.git
83
+ cd car-detector-classifier
84
+ ```
85
+
86
+
87
+
88
+ ## run the app
89
+ ```
90
+ python app.py
91
+ ```
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """GradioUI.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1gTrf304mzjGMheD47oHDhnYTIrEyf4qp
8
+ """
9
+
10
+ !pip install gradio --quiet
11
+ import gradio as gr
12
+ from PIL import Image
13
+ import os
14
+
15
+ import os
16
+ from google.colab import files
17
+
18
+ if not os.path.exists('YOLO.ipynb'):
19
+ print("Please upload YOLO.ipynb (the script exported from your YOLO notebook).")
20
+ uploaded = files.upload() # upload YOLO.ipynb
21
+ print("Uploaded:", list(uploaded.keys()))
22
+ else:
23
+ print("YOLO.ipynb already present.")
24
+
25
+ !ls /content
26
+
27
+ import nbformat
28
+ from nbconvert import PythonExporter
29
+ import runpy
30
+
31
+ # Convert YOLO.ipynb to a .py script dynamically
32
+ with open("YOLO.ipynb") as f:
33
+ nb = nbformat.read(f, as_version=4)
34
+
35
+ # Find the cell that loads car_classifier.pth and modify the path
36
+ # Also, remove any code that tries to open the notebook file as an image
37
+ modified_cells = []
38
+ for cell in nb.cells:
39
+ if cell.cell_type == 'code':
40
+ # This is a heuristic: look for lines containing 'car_classifier.pth'
41
+ if 'car_classifier.pth' in cell.source:
42
+ cell.source = cell.source.replace("'car_classifier.pth'", "'/content/car_classifier.pth'")
43
+ cell.source = cell.source.replace('"car_classifier.pth"', '"/content/car_classifier.pth"')
44
+
45
+ # Heuristic to remove code that might try to open the notebook as an image
46
+ if 'Image.open(' in cell.source and 'YOLO.ipynb' in cell.source:
47
+ cell.source = '# Removed potential image loading of notebook file:\n#' + cell.source
48
+
49
+ py_exporter = PythonExporter()
50
+ (code, _) = py_exporter.from_notebook_node(nb)
51
+
52
+ # Save temporarily as script
53
+ with open("yolo_converted.py", "w") as f:
54
+ f.write(code)
55
+
56
+ # Now safely import detect_and_classify() from that converted script
57
+ mod = runpy.run_path("yolo_converted.py")
58
+ detect_and_classify = mod.get("detect_and_classify")
59
+
60
+ if not detect_and_classify:
61
+ raise RuntimeError("Function detect_and_classify not found in YOLO.ipynb")
62
+
63
+ print("✅ YOLO function imported successfully")
64
+
65
+ import torch, os, json
66
+ pth = "/content/car_classifier.pth"
67
+ print("Exists:", os.path.exists(pth))
68
+ ckpt = torch.load(pth, map_location="cpu")
69
+ print("Type:", type(ckpt))
70
+
71
+ if isinstance(ckpt, dict):
72
+ keys = list(ckpt.keys())
73
+ print("Checkpoint keys (first 20):", keys[:20])
74
+ # If it's a pure state_dict, it will look like parameter names (e.g. 'conv1.weight')
75
+ # If it's a wrapped checkpoint, it may contain 'model_state_dict' or 'class_names'
76
+ else:
77
+ print("Checkpoint is not a dict; it's probably a raw model object.")
78
+
79
+ !pip install -q datasets
80
+
81
+ from datasets import load_dataset
82
+ ds = load_dataset("tanganke/stanford_cars")
83
+ # HF dataset provides label names in the train feature
84
+ class_names = ds["train"].features["label"].names
85
+ print("Loaded", len(class_names), "class names. Sample:", class_names[:10])
86
+
87
+ # Save to disk for reuse
88
+ import json
89
+ with open("class_names.json", "w") as f:
90
+ json.dump(class_names, f, indent=2)
91
+ print("Saved class_names.json")
92
+
93
+ import json, os
94
+ if os.path.exists("class_names.json"):
95
+ with open("class_names.json") as f:
96
+ class_names = json.load(f)
97
+ print("Loaded class_names from file, len =", len(class_names))
98
+ else:
99
+ print("class_names.json not found; run the HF cell above.")
100
+
101
+ import gradio as gr
102
+ import os
103
+
104
+ # ensure class_names exists in the notebook (from previous cell)
105
+ try:
106
+ assert class_names is not None and len(class_names) > 0
107
+ print("Using class_names with", len(class_names), "entries")
108
+ except Exception:
109
+ class_names = None
110
+ print("class_names not available; will show numeric labels")
111
+
112
+ def gradio_interface(image, *args, **kwargs):
113
+ if image is None:
114
+ return "Please upload an image."
115
+
116
+ temp_path = "temp_image.png"
117
+ image.save(temp_path)
118
+
119
+ try:
120
+ results = detect_and_classify(temp_path) # your notebook function
121
+ except Exception as e:
122
+ return f"❌ Error running YOLO pipeline: {e}"
123
+ finally:
124
+ if os.path.exists(temp_path):
125
+ os.remove(temp_path)
126
+
127
+ if not results:
128
+ return "No cars detected."
129
+
130
+ lines = [f"Cars detected: {len(results)}"]
131
+
132
+ for i, item in enumerate(results, start=1):
133
+ # handle both 3-tuple and 4-tuple safely
134
+ if len(item) == 4:
135
+ crop, pred, color, conf = item
136
+ else:
137
+ crop, pred, color = item
138
+ conf = None
139
+
140
+ # map pred -> human name if possible
141
+ if isinstance(pred, int):
142
+ if class_names and 0 <= pred < len(class_names):
143
+ name = class_names[pred]
144
+ else:
145
+ name = f"Class {pred}"
146
+ else:
147
+ name = str(pred)
148
+
149
+ # Format with confidence if available
150
+ if conf is not None:
151
+ lines.append(f"Car {i}: {color} {name} ({conf*100:.1f}% confident)")
152
+ else:
153
+ lines.append(f"Car {i}: {color} {name}")
154
+
155
+ return "\n".join(lines)
156
+
157
+ # Launch Gradio Interface
158
+ iface = gr.Interface(
159
+ fn=gradio_interface,
160
+ inputs=gr.Image(type="pil", label="Upload an Image"),
161
+ outputs=gr.Textbox(label="Detection & Classification Results"),
162
+ title="Car Detector + Classifier (YOLO)",
163
+ description="Upload a car image and get its color, model, and confidence score.",
164
+ )
165
+ iface.launch(share=True)
166
+
167
+ # Test the gradio_interface function with the venza.jpg image
168
+ image_path = "/content/venza.jpg"
169
+ try:
170
+ # Open the image file
171
+ image = Image.open(image_path)
172
+ # Call the gradio_interface function, passing class_names
173
+ test_output = gradio_interface(image, class_names)
174
+ # Print the output
175
+ print(test_output)
176
+ except FileNotFoundError:
177
+ print(f"Error: Image file not found at {image_path}")
178
+ except Exception as e:
179
+ print(f"An error occurred: {e}")
180
+
181
+ !grep -n "results" YOLO.ipynb
space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/space_repo/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ trackio<1.0