Bappadala Rohith Kumar Naidu commited on
Commit
ae454c7
Β·
1 Parent(s): 81e2e9c

chore: remove helper scripts

Browse files
notebooks/cell_dump.txt DELETED
@@ -1,40 +0,0 @@
1
- === Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb ===
2
- Code Cell 0: # Cell 0 β€” Upload Dataset | from google.colab import files | print("β–Ά UPLOAD your accidents CSV dataset NOW:")
3
- Code Cell 1: # Cell 1 β€” Read baseline datasets | import pandas as pd, json | df = pd.read_csv(filename)
4
- Code Cell 2: # Cell 2 β€” Generate Summary JSON | state_col = next((c for c in df.columns if 'state' in c), None) | acc_col = next((c for c in df.columns if 'accident' in c), df.columns[2])
5
- Code Cell 3: # Cell 3 β€” Process raw GPS tags into hotspot clusters | lat_col = next((c for c in df.columns if 'lat' in c), None) | lon_col = next((c for c in df.columns if 'lon' in c or 'lng' in c), None)
6
-
7
- === ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb ===
8
- Code Cell 0: import time | from IPython.display import Javascript | display(Javascript('''
9
- Code Cell 1: # Cell 1 β€” Install RAG tools | !pip install chromadb==0.5.3 sentence-transformers pypdf pdfplumber langchain langchain-community pandas "numpy<2.0.0" -q | print("βœ… Installations complete")
10
- Code Cell 2: # Cell 2 β€” Initialize Embedder Models | # We use TWO embedders: English for legal text, Multilingual for standard files | from sentence_transformers import SentenceTransformer
11
- Code Cell 3: # Cell 3 β€” Initialize Chroma Collections | import chromadb | client = chromadb.PersistentClient(path='/content/chroma_db')
12
- Code Cell 4: # Cell 3.5 β€” Create Folders and Upload Files | import os, shutil | from google.colab import files
13
- Code Cell 5: # Cell 4 β€” Helper functions for Document Chunking | from langchain_community.document_loaders import PyPDFLoader | from langchain_text_splitters import RecursiveCharacterTextSplitter
14
- Code Cell 6: # Cell 5 β€” Parse & Embed strictly Legal PDFs | for pdf in glob.glob('/content/rag_data/legal/*.pdf'): | add_pdf(pdf, legal_col, en_embedder, os.path.basename(pdf))
15
- Code Cell 7: # Cell 6 β€” Parse & Embed Multilingual CSV/Rules | if os.path.exists('/content/rag_data/violations_seed.csv'): | print("Violations file found, embedding into general_col...")
16
- Code Cell 8: # Cell 7 β€” Download Database zip | import shutil | from google.colab import files
17
- Code Cell 9: print("πŸ” Testing Vector Database Retrieval...") | # Test 1: Ask the violations (general_col) about helmets | print("\n--- Testing Violations Database ---")
18
-
19
- === Risk_Model_ONNX_Training_frontend_public_models_5.ipynb ===
20
- Code Cell 0: # Cell 1 β€” Install ML Toolkit | !pip install scikit-learn skl2onnx pandas numpy -q | print("βœ… Toolkit installed")
21
- Code Cell 1: # Cell 2 β€” Build synthetic data matching sensor ingestion structure | import pandas as pd, numpy as np | # Feature: NH=0, SH=1, MDR=2, VR=3
22
- Code Cell 2: # Cell 3 β€” Train the GradientBoosting Classifier | from sklearn.ensemble import GradientBoostingClassifier | model = GradientBoostingClassifier(n_estimators=50, max_depth=4)
23
- Code Cell 3: # Cell 4 β€” Package as ONNX and Export | from skl2onnx import convert_sklearn | from skl2onnx.common.data_types import FloatTensorType
24
-
25
- === Roads_Data_Processing_backend_data_4.ipynb ===
26
- Code Cell 0: # Cell 1 β€” Toll Plazas Lite | import pandas as pd, json | from google.colab import files
27
-
28
- === YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb ===
29
- Code Cell 0: import time | from IPython.display import Javascript | display(Javascript('''
30
- Code Cell 1: !pip install ultralytics roboflow -q | !pip install onnx onnxruntime -q
31
- Code Cell 2: import os | from ultralytics import YOLO | print("βœ… Ultralytics ready, CUDA:", os.popen("nvidia-smi --query-gpu=name --format=csv,noheader").read().strip())
32
- Code Cell 3: from google.colab import files | uploaded = files.upload()
33
- Code Cell 4: # Cell 3 β€” Extract the main zipped dataset | import zipfile | with zipfile.ZipFile('/content/archive.zip', 'r') as z:
34
- Code Cell 5: # Cell 4 β€” Setup master directory structure for merged datasets | import os, shutil | from pathlib import Path
35
- Code Cell 6: # Cell 5 β€” Copy all images and labels into the merged folder (Bulletproof Search) | # List of directories containing datasets (Assuming they are available in /content/) | DATASETS = [
36
- Code Cell 7: # Cell 6 β€” Create data.yaml mapping to our 3 classes | data_yaml = """ | train: /content/merged/train/images
37
- Code Cell 8: # Cell 7 β€” Train (45-60 min on T4 GPU) | model = YOLO('yolov8n.pt') | results = model.train(
38
- Code Cell 9: # Cell 8 β€” Export ONNX | best_model = YOLO('/content/runs/detect/pothole_v1/weights/best.pt') | best_model.export(format='onnx', imgsz=640, opset=12, simplify=True)
39
- Code Cell 10: # Cell 9 β€” Download the weights | import shutil | from google.colab import files
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/check_cells.py DELETED
@@ -1,17 +0,0 @@
1
- import json, glob
2
-
3
- with open('cell_dump.txt', 'w', encoding='utf-8') as out:
4
- for nb_path in glob.glob('*.ipynb'):
5
- out.write(f'=== {nb_path} ===\n')
6
- with open(nb_path, 'r', encoding='utf-8') as f:
7
- nb = json.load(f)
8
- c_idx = 0
9
- for cell in nb['cells']:
10
- if cell['cell_type'] == 'code':
11
- lines = cell['source']
12
- # Get first 3 nonempty lines
13
- non_empty = [l.strip() for l in lines if l.strip()]
14
- prefix = ' | '.join(non_empty[:3])
15
- out.write(f'Code Cell {c_idx}: {prefix}\n')
16
- c_idx += 1
17
- out.write('\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/smart_inject.py DELETED
@@ -1,132 +0,0 @@
1
- import json
2
-
3
- def md(lines):
4
- return {"cell_type": "markdown", "metadata": {}, "source": lines}
5
-
6
- # Definitions of Markdowns
7
- nb1_mds = [
8
- md(["# πŸš— YOLOv8 Pothole & Road Damage Detector β€” Training Pipeline\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `pothole_v1/weights/best.onnx` β†’ deployed to `frontend/public/models/`\n", "\n", "This notebook trains a YOLOv8n object detection model to identify **potholes, cracks, and manholes** on Indian roads.\n", "The trained model is exported to ONNX format for in-browser inference via `onnxruntime-web`.\n", "\n", "---\n", "### πŸ“‹ Pipeline Overview\n", "| Step | What happens |\n", "|------|-------------|\n", "| 1 | Install Ultralytics + ONNX and verify GPU |\n", "| 2 | Upload `archive.zip` dataset (road_damage_2025) |\n", "| 3 | Extract the zip into `/content/pothole_data/` |\n", "| 4 | Create master merged directory structure |\n", "| 5 | Merge all dataset images + labels into one folder |\n", "| 6 | Write `data.yaml` for 3-class detection |\n", "| 7 | Train YOLOv8n for 50 epochs on T4 GPU (~45 min) |\n", "| 8 | Export best weights to ONNX |\n", "\n", "> ⚠️ **Requires GPU runtime:** Runtime β†’ Change runtime type β†’ T4 GPU"]),
9
- md(["## πŸ”§ Step 1 β€” Environment Setup\n", "\n", "Keeps the Colab session alive during long training runs and installs all required libraries.\n", "- `ultralytics` β€” YOLOv8 training framework by Ultralytics\n", "- `roboflow` β€” dataset management (optional augmentation)\n", "- `onnx` + `onnxruntime` β€” ONNX export and validation"]),
10
- md(["## βœ… Step 2 β€” Verify GPU & Import YOLO\n", "\n", "Confirms that the Tesla T4 GPU is available and the Ultralytics framework is ready."]),
11
- md(["## πŸ“ Step 3 β€” Upload Dataset\n", "\n", "Upload the `archive.zip` file from the Hub:\n", "```\n", "chatbot_service/data/pothole_training/road_damage_2025/archive.zip\n", "```\n", "> πŸ“‚ This contains ~2,009 labeled road damage images in YOLO format (potholes, cracks, manholes)."]),
12
- md(["## πŸ“¦ Step 4 β€” Extract Dataset Archive\n", "\n", "Extracts `archive.zip` into `/content/pothole_data/`.\n", "This creates the raw YOLO-format dataset structure: `images/` and `labels/` subfolders."]),
13
- md(["## πŸ—‚οΈ Step 5 β€” Create Master Directory Structure\n", "\n", "Creates a unified `merged/` folder with separate `train/` and `valid/` splits.\n", "This allows merging images from multiple datasets (sachin_patel, andrew_mvd) if available."]),
14
- md(["## πŸ”€ Step 6 β€” Merge Datasets (Bulletproof Search)\n", "\n", "Recursively searches all dataset folders for `.jpg` images and `.txt` YOLO labels,\n", "then copies them all into the master `merged/train/` directory.\n", "\n", "> βœ… Result: **2,009 training images** merged from road_damage_2025."]),
15
- md(["## πŸ“ Step 7 β€” Write `data.yaml`\n", "\n", "Creates the YOLO dataset configuration file defining:\n", "- 3 detection classes: `['pothole', 'crack', 'manhole']`\n", "- Train and validation paths\n", "\n", "The `nc: 3` setting overrides the default YOLOv8 ImageNet classes."]),
16
- md(["## πŸš€ Step 8 β€” Train YOLOv8n (50 Epochs) & Export ONNX\n", "\n", "Trains YOLOv8 nano on the merged dataset using these hyperparameters:\n", "\n", "| Parameter | Value | Reason |\n", "|-----------|-------|--------|\n", "| `model` | yolov8n.pt | Smallest model β€” runs well in browser via ONNX |\n", "| `epochs` | 50 | Balanced between accuracy and training time |\n", "| `imgsz` | 640 | Standard YOLO input resolution |\n", "| `batch` | 16 | Fits T4 14GB VRAM |\n", "| `device` | 0 (GPU) | CUDA training |\n", "\n", "> ⏱️ Expected training time: **~45 minutes** on Tesla T4\n", "> πŸ“ˆ Final mAP@50: ~**0.75+** after 50 epochs\n","\n**It will then export to ONNX and download the final model.**"])
17
- ]
18
-
19
- nb2_mds = [
20
- md(["# 🧠 ChromaDB RAG Vectorstore β€” Legal & Medical PDF Ingestion\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `chroma_db/` directory β†’ deployed to `chatbot_service/data/chroma_db/`\n", "\n", "This notebook builds the **Retrieval-Augmented Generation (RAG)** knowledge base for the SafeVisionAI chatbot.\n", "It ingests Indian legal documents (Motor Vehicles Act, MoRTH circulars) and first-aid medical PDFs,\n", "chunks them, embeds them using `sentence-transformers`, and stores them in a **ChromaDB** vector store.\n", "\n", "---\n", "### πŸ—‚οΈ Source Documents\n", "| Category | Files | Source |\n", "|----------|-------|--------|\n", "| Legal | Motor Vehicles Act 2019, MoRTH 2022 | `download_legal_pdfs.py` |\n", "| Medical | First Aid guides, Emergency protocols | `download_legal_pdfs.py` |\n", "\n", "### πŸ”„ Pipeline\n", "```\n", "PDFs β†’ pdfplumber chunks β†’ sentence-transformer embeddings β†’ ChromaDB index\n", "```\n", "\n", "> πŸ’‘ The resulting `chroma_db/` is what the chatbot queries at runtime for grounded answers."]),
21
- md(["## πŸ”§ Step 1 β€” Install Dependencies\n", "\n", "Installs the full RAG stack:\n", "- `chromadb` β€” local vector database for semantic search\n", "- `sentence-transformers` β€” `all-MiniLM-L6-v2` model for text embeddings\n", "- `pdfplumber` β€” PDF text extraction with page layout awareness\n", "- `langchain` β€” document chunking utilities"]),
22
- md(["## πŸ“‚ Step 2 β€” Upload PDF Documents\n", "\n", "Upload all legal and medical PDFs from:\n", "```\n", "chatbot_service/data/legal/\n", "chatbot_service/data/medical/\n", "```\n", "\n", "> πŸ“„ Expected PDFs: Motor_Vehicles_Act.pdf, MoRTH_2022_Report.pdf, first_aid_guide.pdf, etc."]),
23
- md(["## βœ‚οΈ Step 3 β€” Extract & Chunk PDF Text\n", "\n", "Uses `pdfplumber` to extract text from each PDF page,\n", "then splits into fixed-size chunks (512 tokens) with 50-token overlap.\n", "\n", "Chunking ensures the embedding model sees coherent, context-rich passages\n", "rather than arbitrarily cut sentences."]),
24
- md(["## πŸ”’ Step 4 β€” Generate Embeddings\n", "\n", "Uses the `all-MiniLM-L6-v2` sentence-transformer model to convert each text chunk\n", "into a 384-dimensional embedding vector.\n", "\n", "| Model | Dimensions | Speed | Quality |\n", "|-------|-----------|-------|---------|\n", "| all-MiniLM-L6-v2 | 384 | Fast | Good for semantic QA |"]),
25
- md(["## πŸ’Ύ Step 5 β€” Build & Persist ChromaDB Index\n", "\n", "Creates a persistent ChromaDB collection and upserts all embedded chunks.\n", "The resulting `chroma_db/` folder contains the SQLite + vector index files.\n", "\n", "> πŸ“¦ Output size: ~50-100MB depending on number of PDFs ingested."]),
26
- md(["## πŸ“₯ Step 6 β€” Download ChromaDB\n", "\n", "Zips the `chroma_db/` directory and downloads it for deployment.\n", "Place the extracted folder at: `chatbot_service/data/chroma_db/`\n", "\n", "The chatbot service auto-loads this at startup β€” no rebuild needed."]),
27
- ]
28
-
29
- nb3_mds = [
30
- md(["# πŸ—ΊοΈ Accident EDA & Blackspot Hotspot Generator\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `accidents_summary.json` + `blackspot_seed.csv` β†’ seeded to the backend database\n", "\n", "This notebook processes the **Kaggle India Road Accidents dataset** (1M+ rows)\n", "to produce two key intelligence artifacts:\n", "\n", "1. **`accidents_summary.json`** β€” National total + top 10 states by accident count\n", "2. **`blackspot_seed.csv`** β€” GPS clusters with accident counts for map hotspot visualization\n", "\n", "---\n", "### πŸ“Š Dataset\n", "- **Source:** Kaggle India Road Accidents dataset\n", "- **Size:** ~1,048,575 rows Β· 30+ columns\n", "- **Acquired via:** `setup_kaggle.ps1` + `scripts/data/seed_blackspots.py`\n", "\n", "### πŸ”„ Pipeline\n", "```\n", "Raw CSV β†’ Normalize columns β†’ State summary β†’ GPS cluster β†’ blackspot_seed.csv\n", "```"]),
31
- md(["## πŸ“‚ Step 0 β€” Upload Accidents Dataset\n", "\n", "Upload `kaggle_india_accidents.csv` from:\n", "```\n", "chatbot_service/data/accidents/kaggle_india_accidents.csv\n", "```\n", "\n", "> ⚠️ This file is ~450MB. The Hub stores it via Git LFS."]),
32
- md(["## πŸ“– Step 1 β€” Load & Normalize Dataset\n", "\n", "Reads the CSV and normalizes all column names to lowercase snake_case.\n", "Result: **1,048,575 rows** of accident records across Indian states.\n", "\n", "> πŸ’‘ The mixed-type DtypeWarning is expected for columns with mixed numeric/string data."]),
33
- md(["## πŸ“Š Step 2 β€” Generate National Summary JSON\n", "\n", "Auto-detects the `state` and `accident` columns using flexible column name matching,\n", "then computes:\n", "- **National total** β€” sum of all accident counts\n", "- **Top 10 states** β€” ranked by accident volume\n", "\n", "Exports `accidents_summary.json` β€” used by the chatbot to answer national stats queries."]),
34
- md(["## πŸ“ Step 3 β€” Generate GPS Blackspot Clusters\n", "\n", "Groups accident records by rounded GPS coordinates (2 decimal places β‰ˆ ~1kmΒ²),\n", "then counts accidents per grid cell.\n", "\n", "Result: **4,134 blackspot clusters** exported as `blackspot_seed.csv`\n", "β†’ This CSV is loaded by `backend/scripts/app/seed_emergency.py` to populate the PostGIS accident layer.\n", "\n", "| Column | Description |\n", "|--------|-------------|\n", "| `lat_r` | Rounded latitude (Β±0.01Β°) |\n", "| `lon_r` | Rounded longitude (Β±0.01Β°) |\n", "| `accident_count` | Number of accidents in this 1kmΒ² cell |"])
35
- ]
36
-
37
- nb4_mds = [
38
- md(["# πŸ›£οΈ Roads & Toll Plaza Data Processing\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `toll_plazas_lite.json` β†’ deployed to `backend/data/roads/`\n", "\n", "This notebook processes the **NHAI Toll Plaza dataset** to produce a lightweight JSON\n", "suitable for the SafeVisionAI backend API and offline PWA map layer.\n", "\n", "---\n", "### πŸ“Š Dataset\n", "- **Source:** NHAI Open Data / custom toll_plazas.csv\n", "- **Fields:** Name, NH Number, Latitude, Longitude\n", "- **Coverage:** All operational toll plazas on National Highways\n", "\n", "### πŸ”„ Pipeline\n", "```\n", "toll_plazas.csv β†’ Select key columns β†’ Rename headers β†’ Export toll_plazas_lite.json\n", "```"]),
39
- md(["## πŸ“¦ Step 1 β€” Upload & Process Toll Plaza CSV\n", "\n", "Upload `toll_plazas.csv` from:\n", "```\n", "backend/data/roads/toll_plazas.csv\n", "```\n", "\n", "The processing pipeline:\n", "1. Reads the CSV with `pandas`\n", "2. Selects only 4 essential columns: `name, id, lat, lon`\n", "3. Drops rows with missing coordinates\n", "4. Renames to human-readable headers\n", "5. Exports as `toll_plazas_lite.json`\n", "\n", "The resulting JSON is consumed by the backend `/api/roads/tolls` endpoint\n", "and the offline PWA map layer for toll overlay rendering.\n", "\n", "> πŸ“¦ Output size: ~65KB (vs 2MB+ raw CSV)"])
40
- ]
41
-
42
- nb5_mds = [
43
- md(["# ⚑ Road Risk Scoring Model β€” ONNX Training Pipeline\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `risk_model.onnx` (~21KB) β†’ deployed to `frontend/public/models/`\n", "\n", "This notebook trains a **GradientBoosting classifier** to predict real-time road risk\n", "and exports it as ONNX for **in-browser inference** β€” no server call needed.\n", "\n", "---\n", "### 🧠 Model Architecture\n", "| Component | Details |\n", "|-----------|--------|\n", "| Algorithm | GradientBoostingClassifier |\n", "| Input features | 5 (road type, hour, rain, speed limit, prev accidents) |\n", "| Output | Binary: `high_risk` (0 or 1) |\n", "| Export | ONNX via `skl2onnx` |\n", "| Size | ~21KB β€” loads in milliseconds in browser |\n", "\n", "### πŸ”„ Pipeline\n", "```\n", "Synthetic data generation β†’ GBM training β†’ ONNX conversion β†’ Download\n", "```\n", "\n", "> πŸ’‘ The model runs entirely client-side in the SafeVisionAI PWA using `onnxruntime-web`."]),
44
- md(["## πŸ”§ Step 1 β€” Install ML Toolkit\n", "\n", "Installs the minimum stack needed for training and ONNX export:\n", "- `scikit-learn` β€” GradientBoostingClassifier\n", "- `skl2onnx` β€” converts sklearn models to ONNX format\n", "- `pandas` + `numpy` β€” data generation and manipulation"]),
45
- md(["## πŸ—οΈ Step 2 β€” Build Synthetic Training Data\n", "\n", "Generates 5,000 synthetic road sensor records matching the live app's data structure:\n", "\n", "| Feature | Values | Description |\n", "|---------|--------|-------------|\n", "| `road_type` | 0-3 | NH=0, SH=1, MDR=2, VR=3 |\n", "| `hour` | 0-23 | Hour of day |\n", "| `is_rain` | 0/1 | Weather condition |\n", "| `speed_limit` | 40/60/80/100 | Posted speed (km/h) |\n", "| `prev_accidents` | Poisson(2) | Historical accident count |\n", "\n", "**Label logic:** `high_risk = 1` when: Night hours (10pm–4am) + National/State Highway + Raining\n", "This reflects real-world patterns from the India accident dataset."]),
46
- md(["## 🎯 Step 3 β€” Train GradientBoosting Classifier\n", "\n", "Trains a GBM with 50 estimators and max depth 4:\n", "- **Fast:** <10 seconds on CPU\n", "- **Accurate:** Handles non-linear risk patterns well\n", "- **Tiny:** Converts to 21KB ONNX β€” ideal for edge/PWA deployment"]),
47
- md(["## πŸ“¦ Step 4 β€” Export to ONNX & Download\n", "\n", "Converts the trained sklearn model to ONNX format using `skl2onnx`:\n", "- **Input:** `FloatTensorType([None, 5])` β€” batch of 5-feature vectors\n", "- **Output:** Risk probability + binary class label\n", "\n", "Download `risk_model.onnx` and place at:\n", "```\n", "frontend/public/models/risk_model.onnx\n", "```\n", "\n", "The Next.js PWA loads this at startup and runs inference on each map segment click.\n", "\n", "> βœ… Final output: **~21KB** ONNX model β€” ready for browser deployment"])
48
- ]
49
-
50
- # Mapping structure:
51
- # nb_path: { code_cell_index: list of markdowns to insert BEFORE it }
52
- config = {
53
- 'YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb': {
54
- 0: [nb1_mds[0]], # Title
55
- 1: [nb1_mds[1]], # Step 1 (pip install)
56
- 2: [nb1_mds[2]], # Step 2 (import os)
57
- 3: [nb1_mds[3]], # Step 3 (upload)
58
- 4: [nb1_mds[4]], # Step 4 (extract)
59
- 5: [nb1_mds[5]], # Step 5 (master directory)
60
- 6: [nb1_mds[6]], # Step 6 (merge datasets)
61
- 7: [nb1_mds[7]], # Step 7 (data.yaml)
62
- 8: [nb1_mds[8]] # Step 8 (train & export)
63
- },
64
- 'ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb': {
65
- 0: [nb2_mds[0]], # Title
66
- 1: [nb2_mds[1]], # Step 1: Install tools
67
- 4: [nb2_mds[2]], # Step 2: Upload Files (cell 3.5 in code)
68
- 5: [nb2_mds[3]], # Step 3: Helper functions chunking (cell 4 in code)
69
- 6: [nb2_mds[4]], # Step 4: Parse & Embed legal (cell 5 in code)
70
- 7: [nb2_mds[5]], # Step 5: Build & persist index (cell 6 in code)
71
- 8: [nb2_mds[6]] # Step 6: Download Database
72
- },
73
- 'Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb': {
74
- 0: [nb3_mds[0], nb3_mds[1]], # Title + Step 0: Upload
75
- 1: [nb3_mds[2]], # Step 1: Load
76
- 2: [nb3_mds[3]], # Step 2: Summary
77
- 3: [nb3_mds[4]] # Step 3: GPS
78
- },
79
- 'Roads_Data_Processing_backend_data_4.ipynb': {
80
- 0: [nb4_mds[0], nb4_mds[1]] # Title + Step 1
81
- },
82
- 'Risk_Model_ONNX_Training_frontend_public_models_5.ipynb': {
83
- 0: [nb5_mds[0], nb5_mds[1]], # Title + Install ML
84
- 1: [nb5_mds[2]], # Step 2 Build synthetic Data
85
- 2: [nb5_mds[3]], # Step 3 Train GradientBoosting
86
- 3: [nb5_mds[4]] # Step 4 Export ONNX
87
- }
88
- }
89
-
90
- import os
91
- def reset_and_inject():
92
- # Fix both repositories
93
- repos = [
94
- r"C:\Hackathons\IITM\SafeVisionAI-Dataset-Hub\notebooks",
95
- r"C:\Hackathons\IITM\SafeVisionAI\notebooks"
96
- ]
97
-
98
- for repo in repos:
99
- print(f"\nProcessing {repo}")
100
- for nb_name, mapping in config.items():
101
- path = os.path.join(repo, nb_name)
102
- if not os.path.exists(path):
103
- print(f"[SKIP] {nb_name}")
104
- continue
105
-
106
- with open(path, 'r', encoding='utf-8') as f:
107
- nb = json.load(f)
108
-
109
- # Start fresh with only code cells (wipe existing markdowns)
110
- code_cells = [c for c in nb['cells'] if c['cell_type'] == 'code']
111
-
112
- new_cells = []
113
- for idx, c_cell in enumerate(code_cells):
114
- # Before adding the code cell, add any markdowns mapped to this index
115
- if idx in mapping:
116
- for md_obj in mapping[idx]:
117
- new_cells.append(md_obj)
118
-
119
- new_cells.append(c_cell)
120
-
121
- # Add any remaining markdowns mapped to an index purely at the end (greater than code count)
122
- # Not needed for our current dict but safe.
123
-
124
- nb['cells'] = new_cells
125
- with open(path, 'w', encoding='utf-8') as f:
126
- json.dump(nb, f, indent=2, ensure_ascii=False)
127
-
128
- print(f"[OK] {nb_name} -> now has {len(new_cells)} total cells")
129
-
130
- if __name__ == "__main__":
131
- reset_and_inject()
132
- print("\n[DONE] All cells perfectly mapped, zero duplicates.")