Datasets:
Bappadala Rohith Kumar Naidu commited on
Commit Β·
81e2e9c
1
Parent(s): ac89bce
fix(notebooks): remap markdown documentation cells to precisely match target code cells
Browse files- notebooks/Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb +7 -7
- notebooks/ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb +59 -59
- notebooks/Risk_Model_ONNX_Training_frontend_public_models_5.ipynb +4 -4
- notebooks/Roads_Data_Processing_backend_data_4.ipynb +4 -4
- notebooks/YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb +51 -50
- notebooks/cell_dump.txt +40 -0
- notebooks/check_cells.py +17 -0
- notebooks/smart_inject.py +132 -0
notebooks/Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb
CHANGED
|
@@ -20,10 +20,10 @@
|
|
| 20 |
"source": [
|
| 21 |
"# πΊοΈ Accident EDA & Blackspot Hotspot Generator\n",
|
| 22 |
"\n",
|
| 23 |
-
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026
|
| 24 |
"**Output:** `accidents_summary.json` + `blackspot_seed.csv` β seeded to the backend database\n",
|
| 25 |
"\n",
|
| 26 |
-
"This notebook processes the **Kaggle India Road Accidents dataset** (1M+ rows)
|
| 27 |
"to produce two key intelligence artifacts:\n",
|
| 28 |
"\n",
|
| 29 |
"1. **`accidents_summary.json`** β National total + top 10 states by accident count\n",
|
|
@@ -47,7 +47,7 @@
|
|
| 47 |
"source": [
|
| 48 |
"## π Step 0 β Upload Accidents Dataset\n",
|
| 49 |
"\n",
|
| 50 |
-
"Upload `kaggle_india_accidents.csv` from:
|
| 51 |
"```\n",
|
| 52 |
"chatbot_service/data/accidents/kaggle_india_accidents.csv\n",
|
| 53 |
"```\n",
|
|
@@ -293,7 +293,7 @@
|
|
| 293 |
"source": [
|
| 294 |
"## π Step 1 β Load & Normalize Dataset\n",
|
| 295 |
"\n",
|
| 296 |
-
"Reads the CSV and normalizes all column names to lowercase snake_case.
|
| 297 |
"Result: **1,048,575 rows** of accident records across Indian states.\n",
|
| 298 |
"\n",
|
| 299 |
"> π‘ The mixed-type DtypeWarning is expected for columns with mixed numeric/string data."
|
|
@@ -340,7 +340,7 @@
|
|
| 340 |
"source": [
|
| 341 |
"## π Step 2 β Generate National Summary JSON\n",
|
| 342 |
"\n",
|
| 343 |
-
"Auto-detects the `state` and `accident` columns using flexible column name matching,
|
| 344 |
"then computes:\n",
|
| 345 |
"- **National total** β sum of all accident counts\n",
|
| 346 |
"- **Top 10 states** β ranked by accident volume\n",
|
|
@@ -448,10 +448,10 @@
|
|
| 448 |
"source": [
|
| 449 |
"## π Step 3 β Generate GPS Blackspot Clusters\n",
|
| 450 |
"\n",
|
| 451 |
-
"Groups accident records by rounded GPS coordinates (2 decimal places β ~1kmΒ²),
|
| 452 |
"then counts accidents per grid cell.\n",
|
| 453 |
"\n",
|
| 454 |
-
"Result: **4,134 blackspot clusters** exported as `blackspot_seed.csv`
|
| 455 |
"β This CSV is loaded by `backend/scripts/app/seed_emergency.py` to populate the PostGIS accident layer.\n",
|
| 456 |
"\n",
|
| 457 |
"| Column | Description |\n",
|
|
|
|
| 20 |
"source": [
|
| 21 |
"# πΊοΈ Accident EDA & Blackspot Hotspot Generator\n",
|
| 22 |
"\n",
|
| 23 |
+
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n",
|
| 24 |
"**Output:** `accidents_summary.json` + `blackspot_seed.csv` β seeded to the backend database\n",
|
| 25 |
"\n",
|
| 26 |
+
"This notebook processes the **Kaggle India Road Accidents dataset** (1M+ rows)\n",
|
| 27 |
"to produce two key intelligence artifacts:\n",
|
| 28 |
"\n",
|
| 29 |
"1. **`accidents_summary.json`** β National total + top 10 states by accident count\n",
|
|
|
|
| 47 |
"source": [
|
| 48 |
"## π Step 0 β Upload Accidents Dataset\n",
|
| 49 |
"\n",
|
| 50 |
+
"Upload `kaggle_india_accidents.csv` from:\n",
|
| 51 |
"```\n",
|
| 52 |
"chatbot_service/data/accidents/kaggle_india_accidents.csv\n",
|
| 53 |
"```\n",
|
|
|
|
| 293 |
"source": [
|
| 294 |
"## π Step 1 β Load & Normalize Dataset\n",
|
| 295 |
"\n",
|
| 296 |
+
"Reads the CSV and normalizes all column names to lowercase snake_case.\n",
|
| 297 |
"Result: **1,048,575 rows** of accident records across Indian states.\n",
|
| 298 |
"\n",
|
| 299 |
"> π‘ The mixed-type DtypeWarning is expected for columns with mixed numeric/string data."
|
|
|
|
| 340 |
"source": [
|
| 341 |
"## π Step 2 β Generate National Summary JSON\n",
|
| 342 |
"\n",
|
| 343 |
+
"Auto-detects the `state` and `accident` columns using flexible column name matching,\n",
|
| 344 |
"then computes:\n",
|
| 345 |
"- **National total** β sum of all accident counts\n",
|
| 346 |
"- **Top 10 states** β ranked by accident volume\n",
|
|
|
|
| 448 |
"source": [
|
| 449 |
"## π Step 3 β Generate GPS Blackspot Clusters\n",
|
| 450 |
"\n",
|
| 451 |
+
"Groups accident records by rounded GPS coordinates (2 decimal places β ~1kmΒ²),\n",
|
| 452 |
"then counts accidents per grid cell.\n",
|
| 453 |
"\n",
|
| 454 |
+
"Result: **4,134 blackspot clusters** exported as `blackspot_seed.csv`\n",
|
| 455 |
"β This CSV is loaded by `backend/scripts/app/seed_emergency.py` to populate the PostGIS accident layer.\n",
|
| 456 |
"\n",
|
| 457 |
"| Column | Description |\n",
|
notebooks/ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb
CHANGED
|
@@ -7890,11 +7890,11 @@
|
|
| 7890 |
"source": [
|
| 7891 |
"# π§ ChromaDB RAG Vectorstore β Legal & Medical PDF Ingestion\n",
|
| 7892 |
"\n",
|
| 7893 |
-
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026
|
| 7894 |
"**Output:** `chroma_db/` directory β deployed to `chatbot_service/data/chroma_db/`\n",
|
| 7895 |
"\n",
|
| 7896 |
-
"This notebook builds the **Retrieval-Augmented Generation (RAG)** knowledge base for the SafeVisionAI chatbot.
|
| 7897 |
-
"It ingests Indian legal documents (Motor Vehicles Act, MoRTH circulars) and first-aid medical PDFs,
|
| 7898 |
"chunks them, embeds them using `sentence-transformers`, and stores them in a **ChromaDB** vector store.\n",
|
| 7899 |
"\n",
|
| 7900 |
"---\n",
|
|
@@ -7912,19 +7912,6 @@
|
|
| 7912 |
"> π‘ The resulting `chroma_db/` is what the chatbot queries at runtime for grounded answers."
|
| 7913 |
]
|
| 7914 |
},
|
| 7915 |
-
{
|
| 7916 |
-
"cell_type": "markdown",
|
| 7917 |
-
"metadata": {},
|
| 7918 |
-
"source": [
|
| 7919 |
-
"## π§ Step 1 β Install Dependencies\n",
|
| 7920 |
-
"\n",
|
| 7921 |
-
"Installs the full RAG stack:\n",
|
| 7922 |
-
"- `chromadb` β local vector database for semantic search\n",
|
| 7923 |
-
"- `sentence-transformers` β `all-MiniLM-L6-v2` model for text embeddings\n",
|
| 7924 |
-
"- `pdfplumber` β PDF text extraction with page layout awareness\n",
|
| 7925 |
-
"- `langchain` β document chunking utilities"
|
| 7926 |
-
]
|
| 7927 |
-
},
|
| 7928 |
{
|
| 7929 |
"cell_type": "code",
|
| 7930 |
"source": [
|
|
@@ -7978,15 +7965,13 @@
|
|
| 7978 |
"cell_type": "markdown",
|
| 7979 |
"metadata": {},
|
| 7980 |
"source": [
|
| 7981 |
-
"##
|
| 7982 |
-
"\n",
|
| 7983 |
-
"Upload all legal and medical PDFs from: \n",
|
| 7984 |
-
"```\n",
|
| 7985 |
-
"chatbot_service/data/legal/\n",
|
| 7986 |
-
"chatbot_service/data/medical/\n",
|
| 7987 |
-
"```\n",
|
| 7988 |
"\n",
|
| 7989 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7990 |
]
|
| 7991 |
},
|
| 7992 |
{
|
|
@@ -8015,19 +8000,6 @@
|
|
| 8015 |
}
|
| 8016 |
]
|
| 8017 |
},
|
| 8018 |
-
{
|
| 8019 |
-
"cell_type": "markdown",
|
| 8020 |
-
"metadata": {},
|
| 8021 |
-
"source": [
|
| 8022 |
-
"## βοΈ Step 3 β Extract & Chunk PDF Text\n",
|
| 8023 |
-
"\n",
|
| 8024 |
-
"Uses `pdfplumber` to extract text from each PDF page, \n",
|
| 8025 |
-
"then splits into fixed-size chunks (512 tokens) with 50-token overlap.\n",
|
| 8026 |
-
"\n",
|
| 8027 |
-
"Chunking ensures the embedding model sees coherent, context-rich passages \n",
|
| 8028 |
-
"rather than arbitrarily cut sentences."
|
| 8029 |
-
]
|
| 8030 |
-
},
|
| 8031 |
{
|
| 8032 |
"cell_type": "code",
|
| 8033 |
"source": [
|
|
@@ -8700,20 +8672,6 @@
|
|
| 8700 |
}
|
| 8701 |
]
|
| 8702 |
},
|
| 8703 |
-
{
|
| 8704 |
-
"cell_type": "markdown",
|
| 8705 |
-
"metadata": {},
|
| 8706 |
-
"source": [
|
| 8707 |
-
"## π’ Step 4 β Generate Embeddings\n",
|
| 8708 |
-
"\n",
|
| 8709 |
-
"Uses the `all-MiniLM-L6-v2` sentence-transformer model to convert each text chunk \n",
|
| 8710 |
-
"into a 384-dimensional embedding vector.\n",
|
| 8711 |
-
"\n",
|
| 8712 |
-
"| Model | Dimensions | Speed | Quality |\n",
|
| 8713 |
-
"|-------|-----------|-------|---------|\n",
|
| 8714 |
-
"| all-MiniLM-L6-v2 | 384 | Fast | Good for semantic QA |"
|
| 8715 |
-
]
|
| 8716 |
-
},
|
| 8717 |
{
|
| 8718 |
"cell_type": "code",
|
| 8719 |
"source": [
|
|
@@ -8755,12 +8713,15 @@
|
|
| 8755 |
"cell_type": "markdown",
|
| 8756 |
"metadata": {},
|
| 8757 |
"source": [
|
| 8758 |
-
"##
|
| 8759 |
"\n",
|
| 8760 |
-
"
|
| 8761 |
-
"
|
|
|
|
|
|
|
|
|
|
| 8762 |
"\n",
|
| 8763 |
-
">
|
| 8764 |
]
|
| 8765 |
},
|
| 8766 |
{
|
|
@@ -9015,12 +8976,13 @@
|
|
| 9015 |
"cell_type": "markdown",
|
| 9016 |
"metadata": {},
|
| 9017 |
"source": [
|
| 9018 |
-
"##
|
| 9019 |
"\n",
|
| 9020 |
-
"
|
| 9021 |
-
"
|
| 9022 |
"\n",
|
| 9023 |
-
"
|
|
|
|
| 9024 |
]
|
| 9025 |
},
|
| 9026 |
{
|
|
@@ -9051,6 +9013,20 @@
|
|
| 9051 |
"execution_count": null,
|
| 9052 |
"outputs": []
|
| 9053 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9054 |
{
|
| 9055 |
"cell_type": "code",
|
| 9056 |
"source": [
|
|
@@ -9077,6 +9053,18 @@
|
|
| 9077 |
}
|
| 9078 |
]
|
| 9079 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9080 |
{
|
| 9081 |
"cell_type": "code",
|
| 9082 |
"source": [
|
|
@@ -9125,6 +9113,18 @@
|
|
| 9125 |
}
|
| 9126 |
]
|
| 9127 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9128 |
{
|
| 9129 |
"cell_type": "code",
|
| 9130 |
"source": [
|
|
|
|
| 7890 |
"source": [
|
| 7891 |
"# π§ ChromaDB RAG Vectorstore β Legal & Medical PDF Ingestion\n",
|
| 7892 |
"\n",
|
| 7893 |
+
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n",
|
| 7894 |
"**Output:** `chroma_db/` directory β deployed to `chatbot_service/data/chroma_db/`\n",
|
| 7895 |
"\n",
|
| 7896 |
+
"This notebook builds the **Retrieval-Augmented Generation (RAG)** knowledge base for the SafeVisionAI chatbot.\n",
|
| 7897 |
+
"It ingests Indian legal documents (Motor Vehicles Act, MoRTH circulars) and first-aid medical PDFs,\n",
|
| 7898 |
"chunks them, embeds them using `sentence-transformers`, and stores them in a **ChromaDB** vector store.\n",
|
| 7899 |
"\n",
|
| 7900 |
"---\n",
|
|
|
|
| 7912 |
"> π‘ The resulting `chroma_db/` is what the chatbot queries at runtime for grounded answers."
|
| 7913 |
]
|
| 7914 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7915 |
{
|
| 7916 |
"cell_type": "code",
|
| 7917 |
"source": [
|
|
|
|
| 7965 |
"cell_type": "markdown",
|
| 7966 |
"metadata": {},
|
| 7967 |
"source": [
|
| 7968 |
+
"## π§ Step 1 β Install Dependencies\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7969 |
"\n",
|
| 7970 |
+
"Installs the full RAG stack:\n",
|
| 7971 |
+
"- `chromadb` β local vector database for semantic search\n",
|
| 7972 |
+
"- `sentence-transformers` β `all-MiniLM-L6-v2` model for text embeddings\n",
|
| 7973 |
+
"- `pdfplumber` β PDF text extraction with page layout awareness\n",
|
| 7974 |
+
"- `langchain` β document chunking utilities"
|
| 7975 |
]
|
| 7976 |
},
|
| 7977 |
{
|
|
|
|
| 8000 |
}
|
| 8001 |
]
|
| 8002 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8003 |
{
|
| 8004 |
"cell_type": "code",
|
| 8005 |
"source": [
|
|
|
|
| 8672 |
}
|
| 8673 |
]
|
| 8674 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8675 |
{
|
| 8676 |
"cell_type": "code",
|
| 8677 |
"source": [
|
|
|
|
| 8713 |
"cell_type": "markdown",
|
| 8714 |
"metadata": {},
|
| 8715 |
"source": [
|
| 8716 |
+
"## π Step 2 β Upload PDF Documents\n",
|
| 8717 |
"\n",
|
| 8718 |
+
"Upload all legal and medical PDFs from:\n",
|
| 8719 |
+
"```\n",
|
| 8720 |
+
"chatbot_service/data/legal/\n",
|
| 8721 |
+
"chatbot_service/data/medical/\n",
|
| 8722 |
+
"```\n",
|
| 8723 |
"\n",
|
| 8724 |
+
"> π Expected PDFs: Motor_Vehicles_Act.pdf, MoRTH_2022_Report.pdf, first_aid_guide.pdf, etc."
|
| 8725 |
]
|
| 8726 |
},
|
| 8727 |
{
|
|
|
|
| 8976 |
"cell_type": "markdown",
|
| 8977 |
"metadata": {},
|
| 8978 |
"source": [
|
| 8979 |
+
"## βοΈ Step 3 β Extract & Chunk PDF Text\n",
|
| 8980 |
"\n",
|
| 8981 |
+
"Uses `pdfplumber` to extract text from each PDF page,\n",
|
| 8982 |
+
"then splits into fixed-size chunks (512 tokens) with 50-token overlap.\n",
|
| 8983 |
"\n",
|
| 8984 |
+
"Chunking ensures the embedding model sees coherent, context-rich passages\n",
|
| 8985 |
+
"rather than arbitrarily cut sentences."
|
| 8986 |
]
|
| 8987 |
},
|
| 8988 |
{
|
|
|
|
| 9013 |
"execution_count": null,
|
| 9014 |
"outputs": []
|
| 9015 |
},
|
| 9016 |
+
{
|
| 9017 |
+
"cell_type": "markdown",
|
| 9018 |
+
"metadata": {},
|
| 9019 |
+
"source": [
|
| 9020 |
+
"## π’ Step 4 β Generate Embeddings\n",
|
| 9021 |
+
"\n",
|
| 9022 |
+
"Uses the `all-MiniLM-L6-v2` sentence-transformer model to convert each text chunk\n",
|
| 9023 |
+
"into a 384-dimensional embedding vector.\n",
|
| 9024 |
+
"\n",
|
| 9025 |
+
"| Model | Dimensions | Speed | Quality |\n",
|
| 9026 |
+
"|-------|-----------|-------|---------|\n",
|
| 9027 |
+
"| all-MiniLM-L6-v2 | 384 | Fast | Good for semantic QA |"
|
| 9028 |
+
]
|
| 9029 |
+
},
|
| 9030 |
{
|
| 9031 |
"cell_type": "code",
|
| 9032 |
"source": [
|
|
|
|
| 9053 |
}
|
| 9054 |
]
|
| 9055 |
},
|
| 9056 |
+
{
|
| 9057 |
+
"cell_type": "markdown",
|
| 9058 |
+
"metadata": {},
|
| 9059 |
+
"source": [
|
| 9060 |
+
"## πΎ Step 5 β Build & Persist ChromaDB Index\n",
|
| 9061 |
+
"\n",
|
| 9062 |
+
"Creates a persistent ChromaDB collection and upserts all embedded chunks.\n",
|
| 9063 |
+
"The resulting `chroma_db/` folder contains the SQLite + vector index files.\n",
|
| 9064 |
+
"\n",
|
| 9065 |
+
"> π¦ Output size: ~50-100MB depending on number of PDFs ingested."
|
| 9066 |
+
]
|
| 9067 |
+
},
|
| 9068 |
{
|
| 9069 |
"cell_type": "code",
|
| 9070 |
"source": [
|
|
|
|
| 9113 |
}
|
| 9114 |
]
|
| 9115 |
},
|
| 9116 |
+
{
|
| 9117 |
+
"cell_type": "markdown",
|
| 9118 |
+
"metadata": {},
|
| 9119 |
+
"source": [
|
| 9120 |
+
"## π₯ Step 6 β Download ChromaDB\n",
|
| 9121 |
+
"\n",
|
| 9122 |
+
"Zips the `chroma_db/` directory and downloads it for deployment.\n",
|
| 9123 |
+
"Place the extracted folder at: `chatbot_service/data/chroma_db/`\n",
|
| 9124 |
+
"\n",
|
| 9125 |
+
"The chatbot service auto-loads this at startup β no rebuild needed."
|
| 9126 |
+
]
|
| 9127 |
+
},
|
| 9128 |
{
|
| 9129 |
"cell_type": "code",
|
| 9130 |
"source": [
|
notebooks/Risk_Model_ONNX_Training_frontend_public_models_5.ipynb
CHANGED
|
@@ -20,10 +20,10 @@
|
|
| 20 |
"source": [
|
| 21 |
"# β‘ Road Risk Scoring Model β ONNX Training Pipeline\n",
|
| 22 |
"\n",
|
| 23 |
-
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026
|
| 24 |
"**Output:** `risk_model.onnx` (~21KB) β deployed to `frontend/public/models/`\n",
|
| 25 |
"\n",
|
| 26 |
-
"This notebook trains a **GradientBoosting classifier** to predict real-time road risk
|
| 27 |
"and exports it as ONNX for **in-browser inference** β no server call needed.\n",
|
| 28 |
"\n",
|
| 29 |
"---\n",
|
|
@@ -100,7 +100,7 @@
|
|
| 100 |
"| `speed_limit` | 40/60/80/100 | Posted speed (km/h) |\n",
|
| 101 |
"| `prev_accidents` | Poisson(2) | Historical accident count |\n",
|
| 102 |
"\n",
|
| 103 |
-
"**Label logic:** `high_risk = 1` when: Night hours (10pmβ4am) + National/State Highway + Raining
|
| 104 |
"This reflects real-world patterns from the India accident dataset."
|
| 105 |
]
|
| 106 |
},
|
|
@@ -195,7 +195,7 @@
|
|
| 195 |
"- **Input:** `FloatTensorType([None, 5])` β batch of 5-feature vectors\n",
|
| 196 |
"- **Output:** Risk probability + binary class label\n",
|
| 197 |
"\n",
|
| 198 |
-
"Download `risk_model.onnx` and place at:
|
| 199 |
"```\n",
|
| 200 |
"frontend/public/models/risk_model.onnx\n",
|
| 201 |
"```\n",
|
|
|
|
| 20 |
"source": [
|
| 21 |
"# β‘ Road Risk Scoring Model β ONNX Training Pipeline\n",
|
| 22 |
"\n",
|
| 23 |
+
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n",
|
| 24 |
"**Output:** `risk_model.onnx` (~21KB) β deployed to `frontend/public/models/`\n",
|
| 25 |
"\n",
|
| 26 |
+
"This notebook trains a **GradientBoosting classifier** to predict real-time road risk\n",
|
| 27 |
"and exports it as ONNX for **in-browser inference** β no server call needed.\n",
|
| 28 |
"\n",
|
| 29 |
"---\n",
|
|
|
|
| 100 |
"| `speed_limit` | 40/60/80/100 | Posted speed (km/h) |\n",
|
| 101 |
"| `prev_accidents` | Poisson(2) | Historical accident count |\n",
|
| 102 |
"\n",
|
| 103 |
+
"**Label logic:** `high_risk = 1` when: Night hours (10pmβ4am) + National/State Highway + Raining\n",
|
| 104 |
"This reflects real-world patterns from the India accident dataset."
|
| 105 |
]
|
| 106 |
},
|
|
|
|
| 195 |
"- **Input:** `FloatTensorType([None, 5])` β batch of 5-feature vectors\n",
|
| 196 |
"- **Output:** Risk probability + binary class label\n",
|
| 197 |
"\n",
|
| 198 |
+
"Download `risk_model.onnx` and place at:\n",
|
| 199 |
"```\n",
|
| 200 |
"frontend/public/models/risk_model.onnx\n",
|
| 201 |
"```\n",
|
notebooks/Roads_Data_Processing_backend_data_4.ipynb
CHANGED
|
@@ -20,10 +20,10 @@
|
|
| 20 |
"source": [
|
| 21 |
"# π£οΈ Roads & Toll Plaza Data Processing\n",
|
| 22 |
"\n",
|
| 23 |
-
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026
|
| 24 |
"**Output:** `toll_plazas_lite.json` β deployed to `backend/data/roads/`\n",
|
| 25 |
"\n",
|
| 26 |
-
"This notebook processes the **NHAI Toll Plaza dataset** to produce a lightweight JSON
|
| 27 |
"suitable for the SafeVisionAI backend API and offline PWA map layer.\n",
|
| 28 |
"\n",
|
| 29 |
"---\n",
|
|
@@ -44,7 +44,7 @@
|
|
| 44 |
"source": [
|
| 45 |
"## π¦ Step 1 β Upload & Process Toll Plaza CSV\n",
|
| 46 |
"\n",
|
| 47 |
-
"Upload `toll_plazas.csv` from:
|
| 48 |
"```\n",
|
| 49 |
"backend/data/roads/toll_plazas.csv\n",
|
| 50 |
"```\n",
|
|
@@ -56,7 +56,7 @@
|
|
| 56 |
"4. Renames to human-readable headers\n",
|
| 57 |
"5. Exports as `toll_plazas_lite.json`\n",
|
| 58 |
"\n",
|
| 59 |
-
"The resulting JSON is consumed by the backend `/api/roads/tolls` endpoint
|
| 60 |
"and the offline PWA map layer for toll overlay rendering.\n",
|
| 61 |
"\n",
|
| 62 |
"> π¦ Output size: ~65KB (vs 2MB+ raw CSV)"
|
|
|
|
| 20 |
"source": [
|
| 21 |
"# π£οΈ Roads & Toll Plaza Data Processing\n",
|
| 22 |
"\n",
|
| 23 |
+
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n",
|
| 24 |
"**Output:** `toll_plazas_lite.json` β deployed to `backend/data/roads/`\n",
|
| 25 |
"\n",
|
| 26 |
+
"This notebook processes the **NHAI Toll Plaza dataset** to produce a lightweight JSON\n",
|
| 27 |
"suitable for the SafeVisionAI backend API and offline PWA map layer.\n",
|
| 28 |
"\n",
|
| 29 |
"---\n",
|
|
|
|
| 44 |
"source": [
|
| 45 |
"## π¦ Step 1 β Upload & Process Toll Plaza CSV\n",
|
| 46 |
"\n",
|
| 47 |
+
"Upload `toll_plazas.csv` from:\n",
|
| 48 |
"```\n",
|
| 49 |
"backend/data/roads/toll_plazas.csv\n",
|
| 50 |
"```\n",
|
|
|
|
| 56 |
"4. Renames to human-readable headers\n",
|
| 57 |
"5. Exports as `toll_plazas_lite.json`\n",
|
| 58 |
"\n",
|
| 59 |
+
"The resulting JSON is consumed by the backend `/api/roads/tolls` endpoint\n",
|
| 60 |
"and the offline PWA map layer for toll overlay rendering.\n",
|
| 61 |
"\n",
|
| 62 |
"> π¦ Output size: ~65KB (vs 2MB+ raw CSV)"
|
notebooks/YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb
CHANGED
|
@@ -22,10 +22,10 @@
|
|
| 22 |
"source": [
|
| 23 |
"# π YOLOv8 Pothole & Road Damage Detector β Training Pipeline\n",
|
| 24 |
"\n",
|
| 25 |
-
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026
|
| 26 |
"**Output:** `pothole_v1/weights/best.onnx` β deployed to `frontend/public/models/`\n",
|
| 27 |
"\n",
|
| 28 |
-
"This notebook trains a YOLOv8n object detection model to identify **potholes, cracks, and manholes** on Indian roads.
|
| 29 |
"The trained model is exported to ONNX format for in-browser inference via `onnxruntime-web`.\n",
|
| 30 |
"\n",
|
| 31 |
"---\n",
|
|
@@ -44,18 +44,6 @@
|
|
| 44 |
"> β οΈ **Requires GPU runtime:** Runtime β Change runtime type β T4 GPU"
|
| 45 |
]
|
| 46 |
},
|
| 47 |
-
{
|
| 48 |
-
"cell_type": "markdown",
|
| 49 |
-
"metadata": {},
|
| 50 |
-
"source": [
|
| 51 |
-
"## π§ Step 1 β Environment Setup\n",
|
| 52 |
-
"\n",
|
| 53 |
-
"Keeps the Colab session alive during long training runs and installs all required libraries.\n",
|
| 54 |
-
"- `ultralytics` β YOLOv8 training framework by Ultralytics\n",
|
| 55 |
-
"- `roboflow` β dataset management (optional augmentation)\n",
|
| 56 |
-
"- `onnx` + `onnxruntime` β ONNX export and validation"
|
| 57 |
-
]
|
| 58 |
-
},
|
| 59 |
{
|
| 60 |
"cell_type": "code",
|
| 61 |
"execution_count": null,
|
|
@@ -109,9 +97,12 @@
|
|
| 109 |
"cell_type": "markdown",
|
| 110 |
"metadata": {},
|
| 111 |
"source": [
|
| 112 |
-
"##
|
| 113 |
"\n",
|
| 114 |
-
"
|
|
|
|
|
|
|
|
|
|
| 115 |
]
|
| 116 |
},
|
| 117 |
{
|
|
@@ -151,13 +142,9 @@
|
|
| 151 |
"cell_type": "markdown",
|
| 152 |
"metadata": {},
|
| 153 |
"source": [
|
| 154 |
-
"##
|
| 155 |
"\n",
|
| 156 |
-
"
|
| 157 |
-
"```\n",
|
| 158 |
-
"chatbot_service/data/pothole_training/road_damage_2025/archive.zip\n",
|
| 159 |
-
"```\n",
|
| 160 |
-
"> π This contains ~2,009 labeled road damage images in YOLO format (potholes, cracks, manholes)."
|
| 161 |
]
|
| 162 |
},
|
| 163 |
{
|
|
@@ -194,10 +181,13 @@
|
|
| 194 |
"cell_type": "markdown",
|
| 195 |
"metadata": {},
|
| 196 |
"source": [
|
| 197 |
-
"##
|
| 198 |
"\n",
|
| 199 |
-
"
|
| 200 |
-
"
|
|
|
|
|
|
|
|
|
|
| 201 |
]
|
| 202 |
},
|
| 203 |
{
|
|
@@ -423,10 +413,10 @@
|
|
| 423 |
"cell_type": "markdown",
|
| 424 |
"metadata": {},
|
| 425 |
"source": [
|
| 426 |
-
"##
|
| 427 |
"\n",
|
| 428 |
-
"
|
| 429 |
-
"This
|
| 430 |
]
|
| 431 |
},
|
| 432 |
{
|
|
@@ -461,12 +451,10 @@
|
|
| 461 |
"cell_type": "markdown",
|
| 462 |
"metadata": {},
|
| 463 |
"source": [
|
| 464 |
-
"##
|
| 465 |
-
"\n",
|
| 466 |
-
"Recursively searches all dataset folders for `.jpg` images and `.txt` YOLO labels, \n",
|
| 467 |
-
"then copies them all into the master `merged/train/` directory.\n",
|
| 468 |
"\n",
|
| 469 |
-
"
|
|
|
|
| 470 |
]
|
| 471 |
},
|
| 472 |
{
|
|
@@ -507,13 +495,12 @@
|
|
| 507 |
"cell_type": "markdown",
|
| 508 |
"metadata": {},
|
| 509 |
"source": [
|
| 510 |
-
"##
|
| 511 |
"\n",
|
| 512 |
-
"
|
| 513 |
-
"
|
| 514 |
-
"- Train and validation paths\n",
|
| 515 |
"\n",
|
| 516 |
-
"
|
| 517 |
]
|
| 518 |
},
|
| 519 |
{
|
|
@@ -562,20 +549,13 @@
|
|
| 562 |
"cell_type": "markdown",
|
| 563 |
"metadata": {},
|
| 564 |
"source": [
|
| 565 |
-
"##
|
| 566 |
-
"\n",
|
| 567 |
-
"Trains YOLOv8 nano on the merged dataset using these hyperparameters:\n",
|
| 568 |
"\n",
|
| 569 |
-
"
|
| 570 |
-
"
|
| 571 |
-
"
|
| 572 |
-
"| `epochs` | 50 | Balanced between accuracy and training time |\n",
|
| 573 |
-
"| `imgsz` | 640 | Standard YOLO input resolution |\n",
|
| 574 |
-
"| `batch` | 16 | Fits T4 14GB VRAM |\n",
|
| 575 |
-
"| `device` | 0 (GPU) | CUDA training |\n",
|
| 576 |
"\n",
|
| 577 |
-
"
|
| 578 |
-
"> π Final mAP@50: ~**0.75+** after 50 epochs"
|
| 579 |
]
|
| 580 |
},
|
| 581 |
{
|
|
@@ -613,6 +593,27 @@
|
|
| 613 |
}
|
| 614 |
]
|
| 615 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 616 |
{
|
| 617 |
"cell_type": "code",
|
| 618 |
"source": [
|
|
|
|
| 22 |
"source": [
|
| 23 |
"# π YOLOv8 Pothole & Road Damage Detector β Training Pipeline\n",
|
| 24 |
"\n",
|
| 25 |
+
"**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n",
|
| 26 |
"**Output:** `pothole_v1/weights/best.onnx` β deployed to `frontend/public/models/`\n",
|
| 27 |
"\n",
|
| 28 |
+
"This notebook trains a YOLOv8n object detection model to identify **potholes, cracks, and manholes** on Indian roads.\n",
|
| 29 |
"The trained model is exported to ONNX format for in-browser inference via `onnxruntime-web`.\n",
|
| 30 |
"\n",
|
| 31 |
"---\n",
|
|
|
|
| 44 |
"> β οΈ **Requires GPU runtime:** Runtime β Change runtime type β T4 GPU"
|
| 45 |
]
|
| 46 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
{
|
| 48 |
"cell_type": "code",
|
| 49 |
"execution_count": null,
|
|
|
|
| 97 |
"cell_type": "markdown",
|
| 98 |
"metadata": {},
|
| 99 |
"source": [
|
| 100 |
+
"## π§ Step 1 β Environment Setup\n",
|
| 101 |
"\n",
|
| 102 |
+
"Keeps the Colab session alive during long training runs and installs all required libraries.\n",
|
| 103 |
+
"- `ultralytics` β YOLOv8 training framework by Ultralytics\n",
|
| 104 |
+
"- `roboflow` β dataset management (optional augmentation)\n",
|
| 105 |
+
"- `onnx` + `onnxruntime` β ONNX export and validation"
|
| 106 |
]
|
| 107 |
},
|
| 108 |
{
|
|
|
|
| 142 |
"cell_type": "markdown",
|
| 143 |
"metadata": {},
|
| 144 |
"source": [
|
| 145 |
+
"## β
Step 2 β Verify GPU & Import YOLO\n",
|
| 146 |
"\n",
|
| 147 |
+
"Confirms that the Tesla T4 GPU is available and the Ultralytics framework is ready."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
]
|
| 149 |
},
|
| 150 |
{
|
|
|
|
| 181 |
"cell_type": "markdown",
|
| 182 |
"metadata": {},
|
| 183 |
"source": [
|
| 184 |
+
"## π Step 3 β Upload Dataset\n",
|
| 185 |
"\n",
|
| 186 |
+
"Upload the `archive.zip` file from the Hub:\n",
|
| 187 |
+
"```\n",
|
| 188 |
+
"chatbot_service/data/pothole_training/road_damage_2025/archive.zip\n",
|
| 189 |
+
"```\n",
|
| 190 |
+
"> π This contains ~2,009 labeled road damage images in YOLO format (potholes, cracks, manholes)."
|
| 191 |
]
|
| 192 |
},
|
| 193 |
{
|
|
|
|
| 413 |
"cell_type": "markdown",
|
| 414 |
"metadata": {},
|
| 415 |
"source": [
|
| 416 |
+
"## π¦ Step 4 β Extract Dataset Archive\n",
|
| 417 |
"\n",
|
| 418 |
+
"Extracts `archive.zip` into `/content/pothole_data/`.\n",
|
| 419 |
+
"This creates the raw YOLO-format dataset structure: `images/` and `labels/` subfolders."
|
| 420 |
]
|
| 421 |
},
|
| 422 |
{
|
|
|
|
| 451 |
"cell_type": "markdown",
|
| 452 |
"metadata": {},
|
| 453 |
"source": [
|
| 454 |
+
"## ποΈ Step 5 β Create Master Directory Structure\n",
|
|
|
|
|
|
|
|
|
|
| 455 |
"\n",
|
| 456 |
+
"Creates a unified `merged/` folder with separate `train/` and `valid/` splits.\n",
|
| 457 |
+
"This allows merging images from multiple datasets (sachin_patel, andrew_mvd) if available."
|
| 458 |
]
|
| 459 |
},
|
| 460 |
{
|
|
|
|
| 495 |
"cell_type": "markdown",
|
| 496 |
"metadata": {},
|
| 497 |
"source": [
|
| 498 |
+
"## π Step 6 β Merge Datasets (Bulletproof Search)\n",
|
| 499 |
"\n",
|
| 500 |
+
"Recursively searches all dataset folders for `.jpg` images and `.txt` YOLO labels,\n",
|
| 501 |
+
"then copies them all into the master `merged/train/` directory.\n",
|
|
|
|
| 502 |
"\n",
|
| 503 |
+
"> β
Result: **2,009 training images** merged from road_damage_2025."
|
| 504 |
]
|
| 505 |
},
|
| 506 |
{
|
|
|
|
| 549 |
"cell_type": "markdown",
|
| 550 |
"metadata": {},
|
| 551 |
"source": [
|
| 552 |
+
"## π Step 7 β Write `data.yaml`\n",
|
|
|
|
|
|
|
| 553 |
"\n",
|
| 554 |
+
"Creates the YOLO dataset configuration file defining:\n",
|
| 555 |
+
"- 3 detection classes: `['pothole', 'crack', 'manhole']`\n",
|
| 556 |
+
"- Train and validation paths\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 557 |
"\n",
|
| 558 |
+
"The `nc: 3` setting overrides the default YOLOv8 ImageNet classes."
|
|
|
|
| 559 |
]
|
| 560 |
},
|
| 561 |
{
|
|
|
|
| 593 |
}
|
| 594 |
]
|
| 595 |
},
|
| 596 |
+
{
|
| 597 |
+
"cell_type": "markdown",
|
| 598 |
+
"metadata": {},
|
| 599 |
+
"source": [
|
| 600 |
+
"## π Step 8 β Train YOLOv8n (50 Epochs) & Export ONNX\n",
|
| 601 |
+
"\n",
|
| 602 |
+
"Trains YOLOv8 nano on the merged dataset using these hyperparameters:\n",
|
| 603 |
+
"\n",
|
| 604 |
+
"| Parameter | Value | Reason |\n",
|
| 605 |
+
"|-----------|-------|--------|\n",
|
| 606 |
+
"| `model` | yolov8n.pt | Smallest model β runs well in browser via ONNX |\n",
|
| 607 |
+
"| `epochs` | 50 | Balanced between accuracy and training time |\n",
|
| 608 |
+
"| `imgsz` | 640 | Standard YOLO input resolution |\n",
|
| 609 |
+
"| `batch` | 16 | Fits T4 14GB VRAM |\n",
|
| 610 |
+
"| `device` | 0 (GPU) | CUDA training |\n",
|
| 611 |
+
"\n",
|
| 612 |
+
"> β±οΈ Expected training time: **~45 minutes** on Tesla T4\n",
|
| 613 |
+
"> π Final mAP@50: ~**0.75+** after 50 epochs\n",
|
| 614 |
+
"\n**It will then export to ONNX and download the final model.**"
|
| 615 |
+
]
|
| 616 |
+
},
|
| 617 |
{
|
| 618 |
"cell_type": "code",
|
| 619 |
"source": [
|
notebooks/cell_dump.txt
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
=== Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb ===
|
| 2 |
+
Code Cell 0: # Cell 0 β Upload Dataset | from google.colab import files | print("βΆ UPLOAD your accidents CSV dataset NOW:")
|
| 3 |
+
Code Cell 1: # Cell 1 β Read baseline datasets | import pandas as pd, json | df = pd.read_csv(filename)
|
| 4 |
+
Code Cell 2: # Cell 2 β Generate Summary JSON | state_col = next((c for c in df.columns if 'state' in c), None) | acc_col = next((c for c in df.columns if 'accident' in c), df.columns[2])
|
| 5 |
+
Code Cell 3: # Cell 3 β Process raw GPS tags into hotspot clusters | lat_col = next((c for c in df.columns if 'lat' in c), None) | lon_col = next((c for c in df.columns if 'lon' in c or 'lng' in c), None)
|
| 6 |
+
|
| 7 |
+
=== ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb ===
|
| 8 |
+
Code Cell 0: import time | from IPython.display import Javascript | display(Javascript('''
|
| 9 |
+
Code Cell 1: # Cell 1 β Install RAG tools | !pip install chromadb==0.5.3 sentence-transformers pypdf pdfplumber langchain langchain-community pandas "numpy<2.0.0" -q | print("β
Installations complete")
|
| 10 |
+
Code Cell 2: # Cell 2 β Initialize Embedder Models | # We use TWO embedders: English for legal text, Multilingual for standard files | from sentence_transformers import SentenceTransformer
|
| 11 |
+
Code Cell 3: # Cell 3 β Initialize Chroma Collections | import chromadb | client = chromadb.PersistentClient(path='/content/chroma_db')
|
| 12 |
+
Code Cell 4: # Cell 3.5 β Create Folders and Upload Files | import os, shutil | from google.colab import files
|
| 13 |
+
Code Cell 5: # Cell 4 β Helper functions for Document Chunking | from langchain_community.document_loaders import PyPDFLoader | from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 14 |
+
Code Cell 6: # Cell 5 β Parse & Embed strictly Legal PDFs | for pdf in glob.glob('/content/rag_data/legal/*.pdf'): | add_pdf(pdf, legal_col, en_embedder, os.path.basename(pdf))
|
| 15 |
+
Code Cell 7: # Cell 6 β Parse & Embed Multilingual CSV/Rules | if os.path.exists('/content/rag_data/violations_seed.csv'): | print("Violations file found, embedding into general_col...")
|
| 16 |
+
Code Cell 8: # Cell 7 β Download Database zip | import shutil | from google.colab import files
|
| 17 |
+
Code Cell 9: print("π Testing Vector Database Retrieval...") | # Test 1: Ask the violations (general_col) about helmets | print("\n--- Testing Violations Database ---")
|
| 18 |
+
|
| 19 |
+
=== Risk_Model_ONNX_Training_frontend_public_models_5.ipynb ===
|
| 20 |
+
Code Cell 0: # Cell 1 β Install ML Toolkit | !pip install scikit-learn skl2onnx pandas numpy -q | print("β
Toolkit installed")
|
| 21 |
+
Code Cell 1: # Cell 2 β Build synthetic data matching sensor ingestion structure | import pandas as pd, numpy as np | # Feature: NH=0, SH=1, MDR=2, VR=3
|
| 22 |
+
Code Cell 2: # Cell 3 β Train the GradientBoosting Classifier | from sklearn.ensemble import GradientBoostingClassifier | model = GradientBoostingClassifier(n_estimators=50, max_depth=4)
|
| 23 |
+
Code Cell 3: # Cell 4 β Package as ONNX and Export | from skl2onnx import convert_sklearn | from skl2onnx.common.data_types import FloatTensorType
|
| 24 |
+
|
| 25 |
+
=== Roads_Data_Processing_backend_data_4.ipynb ===
|
| 26 |
+
Code Cell 0: # Cell 1 β Toll Plazas Lite | import pandas as pd, json | from google.colab import files
|
| 27 |
+
|
| 28 |
+
=== YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb ===
|
| 29 |
+
Code Cell 0: import time | from IPython.display import Javascript | display(Javascript('''
|
| 30 |
+
Code Cell 1: !pip install ultralytics roboflow -q | !pip install onnx onnxruntime -q
|
| 31 |
+
Code Cell 2: import os | from ultralytics import YOLO | print("β
Ultralytics ready, CUDA:", os.popen("nvidia-smi --query-gpu=name --format=csv,noheader").read().strip())
|
| 32 |
+
Code Cell 3: from google.colab import files | uploaded = files.upload()
|
| 33 |
+
Code Cell 4: # Cell 3 β Extract the main zipped dataset | import zipfile | with zipfile.ZipFile('/content/archive.zip', 'r') as z:
|
| 34 |
+
Code Cell 5: # Cell 4 β Setup master directory structure for merged datasets | import os, shutil | from pathlib import Path
|
| 35 |
+
Code Cell 6: # Cell 5 β Copy all images and labels into the merged folder (Bulletproof Search) | # List of directories containing datasets (Assuming they are available in /content/) | DATASETS = [
|
| 36 |
+
Code Cell 7: # Cell 6 β Create data.yaml mapping to our 3 classes | data_yaml = """ | train: /content/merged/train/images
|
| 37 |
+
Code Cell 8: # Cell 7 β Train (45-60 min on T4 GPU) | model = YOLO('yolov8n.pt') | results = model.train(
|
| 38 |
+
Code Cell 9: # Cell 8 β Export ONNX | best_model = YOLO('/content/runs/detect/pothole_v1/weights/best.pt') | best_model.export(format='onnx', imgsz=640, opset=12, simplify=True)
|
| 39 |
+
Code Cell 10: # Cell 9 β Download the weights | import shutil | from google.colab import files
|
| 40 |
+
|
notebooks/check_cells.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json, glob
|
| 2 |
+
|
| 3 |
+
with open('cell_dump.txt', 'w', encoding='utf-8') as out:
|
| 4 |
+
for nb_path in glob.glob('*.ipynb'):
|
| 5 |
+
out.write(f'=== {nb_path} ===\n')
|
| 6 |
+
with open(nb_path, 'r', encoding='utf-8') as f:
|
| 7 |
+
nb = json.load(f)
|
| 8 |
+
c_idx = 0
|
| 9 |
+
for cell in nb['cells']:
|
| 10 |
+
if cell['cell_type'] == 'code':
|
| 11 |
+
lines = cell['source']
|
| 12 |
+
# Get first 3 nonempty lines
|
| 13 |
+
non_empty = [l.strip() for l in lines if l.strip()]
|
| 14 |
+
prefix = ' | '.join(non_empty[:3])
|
| 15 |
+
out.write(f'Code Cell {c_idx}: {prefix}\n')
|
| 16 |
+
c_idx += 1
|
| 17 |
+
out.write('\n')
|
notebooks/smart_inject.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
def md(lines):
|
| 4 |
+
return {"cell_type": "markdown", "metadata": {}, "source": lines}
|
| 5 |
+
|
| 6 |
+
# Definitions of Markdowns
|
| 7 |
+
nb1_mds = [
|
| 8 |
+
md(["# π YOLOv8 Pothole & Road Damage Detector β Training Pipeline\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `pothole_v1/weights/best.onnx` β deployed to `frontend/public/models/`\n", "\n", "This notebook trains a YOLOv8n object detection model to identify **potholes, cracks, and manholes** on Indian roads.\n", "The trained model is exported to ONNX format for in-browser inference via `onnxruntime-web`.\n", "\n", "---\n", "### π Pipeline Overview\n", "| Step | What happens |\n", "|------|-------------|\n", "| 1 | Install Ultralytics + ONNX and verify GPU |\n", "| 2 | Upload `archive.zip` dataset (road_damage_2025) |\n", "| 3 | Extract the zip into `/content/pothole_data/` |\n", "| 4 | Create master merged directory structure |\n", "| 5 | Merge all dataset images + labels into one folder |\n", "| 6 | Write `data.yaml` for 3-class detection |\n", "| 7 | Train YOLOv8n for 50 epochs on T4 GPU (~45 min) |\n", "| 8 | Export best weights to ONNX |\n", "\n", "> β οΈ **Requires GPU runtime:** Runtime β Change runtime type β T4 GPU"]),
|
| 9 |
+
md(["## π§ Step 1 β Environment Setup\n", "\n", "Keeps the Colab session alive during long training runs and installs all required libraries.\n", "- `ultralytics` β YOLOv8 training framework by Ultralytics\n", "- `roboflow` β dataset management (optional augmentation)\n", "- `onnx` + `onnxruntime` β ONNX export and validation"]),
|
| 10 |
+
md(["## β
Step 2 β Verify GPU & Import YOLO\n", "\n", "Confirms that the Tesla T4 GPU is available and the Ultralytics framework is ready."]),
|
| 11 |
+
md(["## π Step 3 β Upload Dataset\n", "\n", "Upload the `archive.zip` file from the Hub:\n", "```\n", "chatbot_service/data/pothole_training/road_damage_2025/archive.zip\n", "```\n", "> π This contains ~2,009 labeled road damage images in YOLO format (potholes, cracks, manholes)."]),
|
| 12 |
+
md(["## π¦ Step 4 β Extract Dataset Archive\n", "\n", "Extracts `archive.zip` into `/content/pothole_data/`.\n", "This creates the raw YOLO-format dataset structure: `images/` and `labels/` subfolders."]),
|
| 13 |
+
md(["## ποΈ Step 5 β Create Master Directory Structure\n", "\n", "Creates a unified `merged/` folder with separate `train/` and `valid/` splits.\n", "This allows merging images from multiple datasets (sachin_patel, andrew_mvd) if available."]),
|
| 14 |
+
md(["## π Step 6 β Merge Datasets (Bulletproof Search)\n", "\n", "Recursively searches all dataset folders for `.jpg` images and `.txt` YOLO labels,\n", "then copies them all into the master `merged/train/` directory.\n", "\n", "> β
Result: **2,009 training images** merged from road_damage_2025."]),
|
| 15 |
+
md(["## π Step 7 β Write `data.yaml`\n", "\n", "Creates the YOLO dataset configuration file defining:\n", "- 3 detection classes: `['pothole', 'crack', 'manhole']`\n", "- Train and validation paths\n", "\n", "The `nc: 3` setting overrides the default YOLOv8 ImageNet classes."]),
|
| 16 |
+
md(["## π Step 8 β Train YOLOv8n (50 Epochs) & Export ONNX\n", "\n", "Trains YOLOv8 nano on the merged dataset using these hyperparameters:\n", "\n", "| Parameter | Value | Reason |\n", "|-----------|-------|--------|\n", "| `model` | yolov8n.pt | Smallest model β runs well in browser via ONNX |\n", "| `epochs` | 50 | Balanced between accuracy and training time |\n", "| `imgsz` | 640 | Standard YOLO input resolution |\n", "| `batch` | 16 | Fits T4 14GB VRAM |\n", "| `device` | 0 (GPU) | CUDA training |\n", "\n", "> β±οΈ Expected training time: **~45 minutes** on Tesla T4\n", "> π Final mAP@50: ~**0.75+** after 50 epochs\n","\n**It will then export to ONNX and download the final model.**"])
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
nb2_mds = [
|
| 20 |
+
md(["# π§ ChromaDB RAG Vectorstore β Legal & Medical PDF Ingestion\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `chroma_db/` directory β deployed to `chatbot_service/data/chroma_db/`\n", "\n", "This notebook builds the **Retrieval-Augmented Generation (RAG)** knowledge base for the SafeVisionAI chatbot.\n", "It ingests Indian legal documents (Motor Vehicles Act, MoRTH circulars) and first-aid medical PDFs,\n", "chunks them, embeds them using `sentence-transformers`, and stores them in a **ChromaDB** vector store.\n", "\n", "---\n", "### ποΈ Source Documents\n", "| Category | Files | Source |\n", "|----------|-------|--------|\n", "| Legal | Motor Vehicles Act 2019, MoRTH 2022 | `download_legal_pdfs.py` |\n", "| Medical | First Aid guides, Emergency protocols | `download_legal_pdfs.py` |\n", "\n", "### π Pipeline\n", "```\n", "PDFs β pdfplumber chunks β sentence-transformer embeddings β ChromaDB index\n", "```\n", "\n", "> π‘ The resulting `chroma_db/` is what the chatbot queries at runtime for grounded answers."]),
|
| 21 |
+
md(["## π§ Step 1 β Install Dependencies\n", "\n", "Installs the full RAG stack:\n", "- `chromadb` β local vector database for semantic search\n", "- `sentence-transformers` β `all-MiniLM-L6-v2` model for text embeddings\n", "- `pdfplumber` β PDF text extraction with page layout awareness\n", "- `langchain` β document chunking utilities"]),
|
| 22 |
+
md(["## π Step 2 β Upload PDF Documents\n", "\n", "Upload all legal and medical PDFs from:\n", "```\n", "chatbot_service/data/legal/\n", "chatbot_service/data/medical/\n", "```\n", "\n", "> π Expected PDFs: Motor_Vehicles_Act.pdf, MoRTH_2022_Report.pdf, first_aid_guide.pdf, etc."]),
|
| 23 |
+
md(["## βοΈ Step 3 β Extract & Chunk PDF Text\n", "\n", "Uses `pdfplumber` to extract text from each PDF page,\n", "then splits into fixed-size chunks (512 tokens) with 50-token overlap.\n", "\n", "Chunking ensures the embedding model sees coherent, context-rich passages\n", "rather than arbitrarily cut sentences."]),
|
| 24 |
+
md(["## π’ Step 4 β Generate Embeddings\n", "\n", "Uses the `all-MiniLM-L6-v2` sentence-transformer model to convert each text chunk\n", "into a 384-dimensional embedding vector.\n", "\n", "| Model | Dimensions | Speed | Quality |\n", "|-------|-----------|-------|---------|\n", "| all-MiniLM-L6-v2 | 384 | Fast | Good for semantic QA |"]),
|
| 25 |
+
md(["## πΎ Step 5 β Build & Persist ChromaDB Index\n", "\n", "Creates a persistent ChromaDB collection and upserts all embedded chunks.\n", "The resulting `chroma_db/` folder contains the SQLite + vector index files.\n", "\n", "> π¦ Output size: ~50-100MB depending on number of PDFs ingested."]),
|
| 26 |
+
md(["## π₯ Step 6 β Download ChromaDB\n", "\n", "Zips the `chroma_db/` directory and downloads it for deployment.\n", "Place the extracted folder at: `chatbot_service/data/chroma_db/`\n", "\n", "The chatbot service auto-loads this at startup β no rebuild needed."]),
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
nb3_mds = [
|
| 30 |
+
md(["# πΊοΈ Accident EDA & Blackspot Hotspot Generator\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `accidents_summary.json` + `blackspot_seed.csv` β seeded to the backend database\n", "\n", "This notebook processes the **Kaggle India Road Accidents dataset** (1M+ rows)\n", "to produce two key intelligence artifacts:\n", "\n", "1. **`accidents_summary.json`** β National total + top 10 states by accident count\n", "2. **`blackspot_seed.csv`** β GPS clusters with accident counts for map hotspot visualization\n", "\n", "---\n", "### π Dataset\n", "- **Source:** Kaggle India Road Accidents dataset\n", "- **Size:** ~1,048,575 rows Β· 30+ columns\n", "- **Acquired via:** `setup_kaggle.ps1` + `scripts/data/seed_blackspots.py`\n", "\n", "### π Pipeline\n", "```\n", "Raw CSV β Normalize columns β State summary β GPS cluster β blackspot_seed.csv\n", "```"]),
|
| 31 |
+
md(["## π Step 0 β Upload Accidents Dataset\n", "\n", "Upload `kaggle_india_accidents.csv` from:\n", "```\n", "chatbot_service/data/accidents/kaggle_india_accidents.csv\n", "```\n", "\n", "> β οΈ This file is ~450MB. The Hub stores it via Git LFS."]),
|
| 32 |
+
md(["## π Step 1 β Load & Normalize Dataset\n", "\n", "Reads the CSV and normalizes all column names to lowercase snake_case.\n", "Result: **1,048,575 rows** of accident records across Indian states.\n", "\n", "> π‘ The mixed-type DtypeWarning is expected for columns with mixed numeric/string data."]),
|
| 33 |
+
md(["## π Step 2 β Generate National Summary JSON\n", "\n", "Auto-detects the `state` and `accident` columns using flexible column name matching,\n", "then computes:\n", "- **National total** β sum of all accident counts\n", "- **Top 10 states** β ranked by accident volume\n", "\n", "Exports `accidents_summary.json` β used by the chatbot to answer national stats queries."]),
|
| 34 |
+
md(["## π Step 3 β Generate GPS Blackspot Clusters\n", "\n", "Groups accident records by rounded GPS coordinates (2 decimal places β ~1kmΒ²),\n", "then counts accidents per grid cell.\n", "\n", "Result: **4,134 blackspot clusters** exported as `blackspot_seed.csv`\n", "β This CSV is loaded by `backend/scripts/app/seed_emergency.py` to populate the PostGIS accident layer.\n", "\n", "| Column | Description |\n", "|--------|-------------|\n", "| `lat_r` | Rounded latitude (Β±0.01Β°) |\n", "| `lon_r` | Rounded longitude (Β±0.01Β°) |\n", "| `accident_count` | Number of accidents in this 1kmΒ² cell |"])
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
nb4_mds = [
|
| 38 |
+
md(["# π£οΈ Roads & Toll Plaza Data Processing\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `toll_plazas_lite.json` β deployed to `backend/data/roads/`\n", "\n", "This notebook processes the **NHAI Toll Plaza dataset** to produce a lightweight JSON\n", "suitable for the SafeVisionAI backend API and offline PWA map layer.\n", "\n", "---\n", "### π Dataset\n", "- **Source:** NHAI Open Data / custom toll_plazas.csv\n", "- **Fields:** Name, NH Number, Latitude, Longitude\n", "- **Coverage:** All operational toll plazas on National Highways\n", "\n", "### π Pipeline\n", "```\n", "toll_plazas.csv β Select key columns β Rename headers β Export toll_plazas_lite.json\n", "```"]),
|
| 39 |
+
md(["## π¦ Step 1 β Upload & Process Toll Plaza CSV\n", "\n", "Upload `toll_plazas.csv` from:\n", "```\n", "backend/data/roads/toll_plazas.csv\n", "```\n", "\n", "The processing pipeline:\n", "1. Reads the CSV with `pandas`\n", "2. Selects only 4 essential columns: `name, id, lat, lon`\n", "3. Drops rows with missing coordinates\n", "4. Renames to human-readable headers\n", "5. Exports as `toll_plazas_lite.json`\n", "\n", "The resulting JSON is consumed by the backend `/api/roads/tolls` endpoint\n", "and the offline PWA map layer for toll overlay rendering.\n", "\n", "> π¦ Output size: ~65KB (vs 2MB+ raw CSV)"])
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
nb5_mds = [
|
| 43 |
+
md(["# β‘ Road Risk Scoring Model β ONNX Training Pipeline\n", "\n", "**Part of:** SafeVisionAI Β· IIT Madras Road Safety Hackathon 2026\n", "**Output:** `risk_model.onnx` (~21KB) β deployed to `frontend/public/models/`\n", "\n", "This notebook trains a **GradientBoosting classifier** to predict real-time road risk\n", "and exports it as ONNX for **in-browser inference** β no server call needed.\n", "\n", "---\n", "### π§ Model Architecture\n", "| Component | Details |\n", "|-----------|--------|\n", "| Algorithm | GradientBoostingClassifier |\n", "| Input features | 5 (road type, hour, rain, speed limit, prev accidents) |\n", "| Output | Binary: `high_risk` (0 or 1) |\n", "| Export | ONNX via `skl2onnx` |\n", "| Size | ~21KB β loads in milliseconds in browser |\n", "\n", "### π Pipeline\n", "```\n", "Synthetic data generation β GBM training β ONNX conversion β Download\n", "```\n", "\n", "> π‘ The model runs entirely client-side in the SafeVisionAI PWA using `onnxruntime-web`."]),
|
| 44 |
+
md(["## π§ Step 1 β Install ML Toolkit\n", "\n", "Installs the minimum stack needed for training and ONNX export:\n", "- `scikit-learn` β GradientBoostingClassifier\n", "- `skl2onnx` β converts sklearn models to ONNX format\n", "- `pandas` + `numpy` β data generation and manipulation"]),
|
| 45 |
+
md(["## ποΈ Step 2 β Build Synthetic Training Data\n", "\n", "Generates 5,000 synthetic road sensor records matching the live app's data structure:\n", "\n", "| Feature | Values | Description |\n", "|---------|--------|-------------|\n", "| `road_type` | 0-3 | NH=0, SH=1, MDR=2, VR=3 |\n", "| `hour` | 0-23 | Hour of day |\n", "| `is_rain` | 0/1 | Weather condition |\n", "| `speed_limit` | 40/60/80/100 | Posted speed (km/h) |\n", "| `prev_accidents` | Poisson(2) | Historical accident count |\n", "\n", "**Label logic:** `high_risk = 1` when: Night hours (10pmβ4am) + National/State Highway + Raining\n", "This reflects real-world patterns from the India accident dataset."]),
|
| 46 |
+
md(["## π― Step 3 β Train GradientBoosting Classifier\n", "\n", "Trains a GBM with 50 estimators and max depth 4:\n", "- **Fast:** <10 seconds on CPU\n", "- **Accurate:** Handles non-linear risk patterns well\n", "- **Tiny:** Converts to 21KB ONNX β ideal for edge/PWA deployment"]),
|
| 47 |
+
md(["## π¦ Step 4 β Export to ONNX & Download\n", "\n", "Converts the trained sklearn model to ONNX format using `skl2onnx`:\n", "- **Input:** `FloatTensorType([None, 5])` β batch of 5-feature vectors\n", "- **Output:** Risk probability + binary class label\n", "\n", "Download `risk_model.onnx` and place at:\n", "```\n", "frontend/public/models/risk_model.onnx\n", "```\n", "\n", "The Next.js PWA loads this at startup and runs inference on each map segment click.\n", "\n", "> β
Final output: **~21KB** ONNX model β ready for browser deployment"])
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
# Mapping structure:
|
| 51 |
+
# nb_path: { code_cell_index: list of markdowns to insert BEFORE it }
|
| 52 |
+
config = {
|
| 53 |
+
'YOLOv8_Pothole_Detector_Training_frontend_public_models_1.ipynb': {
|
| 54 |
+
0: [nb1_mds[0]], # Title
|
| 55 |
+
1: [nb1_mds[1]], # Step 1 (pip install)
|
| 56 |
+
2: [nb1_mds[2]], # Step 2 (import os)
|
| 57 |
+
3: [nb1_mds[3]], # Step 3 (upload)
|
| 58 |
+
4: [nb1_mds[4]], # Step 4 (extract)
|
| 59 |
+
5: [nb1_mds[5]], # Step 5 (master directory)
|
| 60 |
+
6: [nb1_mds[6]], # Step 6 (merge datasets)
|
| 61 |
+
7: [nb1_mds[7]], # Step 7 (data.yaml)
|
| 62 |
+
8: [nb1_mds[8]] # Step 8 (train & export)
|
| 63 |
+
},
|
| 64 |
+
'ChromaDB_RAG_Vectorstore_Build_chatbot_service_data_chroma_db_2.ipynb': {
|
| 65 |
+
0: [nb2_mds[0]], # Title
|
| 66 |
+
1: [nb2_mds[1]], # Step 1: Install tools
|
| 67 |
+
4: [nb2_mds[2]], # Step 2: Upload Files (cell 3.5 in code)
|
| 68 |
+
5: [nb2_mds[3]], # Step 3: Helper functions chunking (cell 4 in code)
|
| 69 |
+
6: [nb2_mds[4]], # Step 4: Parse & Embed legal (cell 5 in code)
|
| 70 |
+
7: [nb2_mds[5]], # Step 5: Build & persist index (cell 6 in code)
|
| 71 |
+
8: [nb2_mds[6]] # Step 6: Download Database
|
| 72 |
+
},
|
| 73 |
+
'Accident_EDA_&_Hotspot_Generator_chatbot_service_data_accidents_3.ipynb': {
|
| 74 |
+
0: [nb3_mds[0], nb3_mds[1]], # Title + Step 0: Upload
|
| 75 |
+
1: [nb3_mds[2]], # Step 1: Load
|
| 76 |
+
2: [nb3_mds[3]], # Step 2: Summary
|
| 77 |
+
3: [nb3_mds[4]] # Step 3: GPS
|
| 78 |
+
},
|
| 79 |
+
'Roads_Data_Processing_backend_data_4.ipynb': {
|
| 80 |
+
0: [nb4_mds[0], nb4_mds[1]] # Title + Step 1
|
| 81 |
+
},
|
| 82 |
+
'Risk_Model_ONNX_Training_frontend_public_models_5.ipynb': {
|
| 83 |
+
0: [nb5_mds[0], nb5_mds[1]], # Title + Install ML
|
| 84 |
+
1: [nb5_mds[2]], # Step 2 Build synthetic Data
|
| 85 |
+
2: [nb5_mds[3]], # Step 3 Train GradientBoosting
|
| 86 |
+
3: [nb5_mds[4]] # Step 4 Export ONNX
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
import os
|
| 91 |
+
def reset_and_inject():
|
| 92 |
+
# Fix both repositories
|
| 93 |
+
repos = [
|
| 94 |
+
r"C:\Hackathons\IITM\SafeVisionAI-Dataset-Hub\notebooks",
|
| 95 |
+
r"C:\Hackathons\IITM\SafeVisionAI\notebooks"
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
for repo in repos:
|
| 99 |
+
print(f"\nProcessing {repo}")
|
| 100 |
+
for nb_name, mapping in config.items():
|
| 101 |
+
path = os.path.join(repo, nb_name)
|
| 102 |
+
if not os.path.exists(path):
|
| 103 |
+
print(f"[SKIP] {nb_name}")
|
| 104 |
+
continue
|
| 105 |
+
|
| 106 |
+
with open(path, 'r', encoding='utf-8') as f:
|
| 107 |
+
nb = json.load(f)
|
| 108 |
+
|
| 109 |
+
# Start fresh with only code cells (wipe existing markdowns)
|
| 110 |
+
code_cells = [c for c in nb['cells'] if c['cell_type'] == 'code']
|
| 111 |
+
|
| 112 |
+
new_cells = []
|
| 113 |
+
for idx, c_cell in enumerate(code_cells):
|
| 114 |
+
# Before adding the code cell, add any markdowns mapped to this index
|
| 115 |
+
if idx in mapping:
|
| 116 |
+
for md_obj in mapping[idx]:
|
| 117 |
+
new_cells.append(md_obj)
|
| 118 |
+
|
| 119 |
+
new_cells.append(c_cell)
|
| 120 |
+
|
| 121 |
+
# Add any remaining markdowns mapped to an index purely at the end (greater than code count)
|
| 122 |
+
# Not needed for our current dict but safe.
|
| 123 |
+
|
| 124 |
+
nb['cells'] = new_cells
|
| 125 |
+
with open(path, 'w', encoding='utf-8') as f:
|
| 126 |
+
json.dump(nb, f, indent=2, ensure_ascii=False)
|
| 127 |
+
|
| 128 |
+
print(f"[OK] {nb_name} -> now has {len(new_cells)} total cells")
|
| 129 |
+
|
| 130 |
+
if __name__ == "__main__":
|
| 131 |
+
reset_and_inject()
|
| 132 |
+
print("\n[DONE] All cells perfectly mapped, zero duplicates.")
|