Datasets:
Formats:
parquet
Languages:
English
Size:
10K - 100K
Tags:
document-detection
corner-detection
perspective-correction
document-scanner
keypoint-regression
License:
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +391 -0
- collages/test_collage.jpg +3 -0
- collages/train_collage.jpg +3 -0
- collages/val_collage.jpg +3 -0
- dataset_info.json +67 -0
- test/data-00000.parquet +3 -0
- test/data-00001.parquet +3 -0
- test/data-00002.parquet +3 -0
- test/data-00003.parquet +3 -0
- test/data-00004.parquet +3 -0
- test/data-00005.parquet +3 -0
- test/data-00006.parquet +3 -0
- train/data-00000.parquet +3 -0
- train/data-00001.parquet +3 -0
- train/data-00002.parquet +3 -0
- train/data-00003.parquet +3 -0
- train/data-00004.parquet +3 -0
- train/data-00005.parquet +3 -0
- train/data-00006.parquet +3 -0
- train/data-00007.parquet +3 -0
- train/data-00008.parquet +3 -0
- train/data-00009.parquet +3 -0
- train/data-00010.parquet +3 -0
- train/data-00011.parquet +3 -0
- train/data-00012.parquet +3 -0
- train/data-00013.parquet +3 -0
- train/data-00014.parquet +3 -0
- train/data-00015.parquet +3 -0
- train/data-00016.parquet +3 -0
- train/data-00017.parquet +3 -0
- train/data-00018.parquet +3 -0
- train/data-00019.parquet +3 -0
- train/data-00020.parquet +3 -0
- train/data-00021.parquet +3 -0
- train/data-00022.parquet +3 -0
- train/data-00023.parquet +3 -0
- train/data-00024.parquet +3 -0
- train/data-00025.parquet +3 -0
- train/data-00026.parquet +3 -0
- train/data-00027.parquet +3 -0
- train/data-00028.parquet +3 -0
- train/data-00029.parquet +3 -0
- train/data-00030.parquet +3 -0
- train/data-00031.parquet +3 -0
- train/data-00032.parquet +3 -0
- val/data-00000.parquet +3 -0
- val/data-00001.parquet +3 -0
- val/data-00002.parquet +3 -0
- val/data-00003.parquet +3 -0
- val/data-00004.parquet +3 -0
README.md
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
dataset_info:
|
| 3 |
+
features:
|
| 4 |
+
- name: image
|
| 5 |
+
dtype: image
|
| 6 |
+
- name: filename
|
| 7 |
+
dtype: string
|
| 8 |
+
- name: is_negative
|
| 9 |
+
dtype: bool
|
| 10 |
+
- name: corner_tl_x
|
| 11 |
+
dtype: float32
|
| 12 |
+
- name: corner_tl_y
|
| 13 |
+
dtype: float32
|
| 14 |
+
- name: corner_tr_x
|
| 15 |
+
dtype: float32
|
| 16 |
+
- name: corner_tr_y
|
| 17 |
+
dtype: float32
|
| 18 |
+
- name: corner_br_x
|
| 19 |
+
dtype: float32
|
| 20 |
+
- name: corner_br_y
|
| 21 |
+
dtype: float32
|
| 22 |
+
- name: corner_bl_x
|
| 23 |
+
dtype: float32
|
| 24 |
+
- name: corner_bl_y
|
| 25 |
+
dtype: float32
|
| 26 |
+
splits:
|
| 27 |
+
- name: train
|
| 28 |
+
num_examples: 32968
|
| 29 |
+
- name: validation
|
| 30 |
+
num_examples: 8645
|
| 31 |
+
- name: test
|
| 32 |
+
num_examples: 6652
|
| 33 |
+
configs:
|
| 34 |
+
- config_name: default
|
| 35 |
+
data_files:
|
| 36 |
+
- split: train
|
| 37 |
+
path: train/*.parquet
|
| 38 |
+
- split: validation
|
| 39 |
+
path: val/*.parquet
|
| 40 |
+
- split: test
|
| 41 |
+
path: test/*.parquet
|
| 42 |
+
license: other
|
| 43 |
+
task_categories:
|
| 44 |
+
- image-segmentation
|
| 45 |
+
- keypoint-detection
|
| 46 |
+
- object-detection
|
| 47 |
+
tags:
|
| 48 |
+
- document-detection
|
| 49 |
+
- corner-detection
|
| 50 |
+
- perspective-correction
|
| 51 |
+
- document-scanner
|
| 52 |
+
- keypoint-regression
|
| 53 |
+
language:
|
| 54 |
+
- en
|
| 55 |
+
size_categories:
|
| 56 |
+
- 10K<n<100K
|
| 57 |
+
---
|
| 58 |
+
|
| 59 |
+
# DocCornerDataset
|
| 60 |
+
|
| 61 |
+
A high-quality document corner detection dataset for training models to detect the four corners of documents in images. This dataset is optimized for building robust document scanning and perspective correction applications.
|
| 62 |
+
|
| 63 |
+
## Dataset Examples
|
| 64 |
+
|
| 65 |
+
### Training Set
|
| 66 |
+
<img src="collages/train_collage.jpg" alt="Training samples" width="600"/>
|
| 67 |
+
|
| 68 |
+
### Validation Set
|
| 69 |
+
<img src="collages/val_collage.jpg" alt="Validation samples" width="600"/>
|
| 70 |
+
|
| 71 |
+
### Test Set
|
| 72 |
+
<img src="collages/test_collage.jpg" alt="Test samples" width="600"/>
|
| 73 |
+
|
| 74 |
+
*Green polygons show the annotated document corners*
|
| 75 |
+
|
| 76 |
+
## Dataset Description
|
| 77 |
+
|
| 78 |
+
This dataset contains images with document corner annotations, optimized for training robust document detection models. It uses the best-performing splits from an iterative dataset cleaning process with multiple quality validation steps.
|
| 79 |
+
|
| 80 |
+
### Key Features
|
| 81 |
+
|
| 82 |
+
- **High Quality Annotations**: Labels refined through iterative cleaning with multiple teacher models
|
| 83 |
+
- **Diverse Document Types**: IDs, invoices, receipts, books, cards, and general documents
|
| 84 |
+
- **Negative Samples**: Includes images without documents for training robust classifiers
|
| 85 |
+
- **No Overlap**: Train, validation, and test splits are completely disjoint
|
| 86 |
+
|
| 87 |
+
## Dataset Statistics
|
| 88 |
+
|
| 89 |
+
| Split | Images | Description |
|
| 90 |
+
|-------|--------|-------------|
|
| 91 |
+
| `train` | 32,968 | Training set (cleaned iter3 + hard negatives) |
|
| 92 |
+
| `validation` | 8,645 | Validation set (cleaned iter3) |
|
| 93 |
+
| `test` | 6,652 | Held-out test set (no overlap with train/val) |
|
| 94 |
+
| **Total** | **48,265** | |
|
| 95 |
+
|
| 96 |
+
## Data Sources and Licenses
|
| 97 |
+
|
| 98 |
+
This dataset is compiled from multiple open-source datasets. **Please refer to the original dataset licenses before using this data.**
|
| 99 |
+
|
| 100 |
+
### MIDV Dataset (ID Cards)
|
| 101 |
+
|
| 102 |
+
Mobile Identity Document Video dataset for identity document detection and recognition.
|
| 103 |
+
|
| 104 |
+
| Dataset | Images | License | Source |
|
| 105 |
+
|---------|--------|---------|--------|
|
| 106 |
+
| **MIDV-500** | ~9,400 | Research use | [Website](http://l3i-share.univ-lr.fr/MIDV500/) |
|
| 107 |
+
| **MIDV-2019** | ~1,350 | Research use | [Website](http://l3i-share.univ-lr.fr/MIDV2019/) |
|
| 108 |
+
|
| 109 |
+
**Citation:**
|
| 110 |
+
```bibtex
|
| 111 |
+
@article{arlazarov2019midv500,
|
| 112 |
+
title={MIDV-500: A Dataset for Identity Documents Analysis and Recognition on Mobile Devices in Video Stream},
|
| 113 |
+
author={Arlazarov, V.V. and Bulatov, K. and Chernov, T. and Arlazarov, V.L.},
|
| 114 |
+
journal={Computer Optics},
|
| 115 |
+
volume={43},
|
| 116 |
+
number={5},
|
| 117 |
+
pages={818--824},
|
| 118 |
+
year={2019}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
@inproceedings{arlazarov2019midv2019,
|
| 122 |
+
title={MIDV-2019: Challenges of the modern mobile-based document OCR},
|
| 123 |
+
author={Arlazarov, V.V. and Bulatov, K. and Chernov, T. and Arlazarov, V.L.},
|
| 124 |
+
booktitle={ICDAR},
|
| 125 |
+
year={2019}
|
| 126 |
+
}
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
### SmartDoc Dataset (Documents)
|
| 130 |
+
|
| 131 |
+
SmartDoc Challenge dataset for document image acquisition and quality assessment.
|
| 132 |
+
|
| 133 |
+
| Dataset | Images | License | Source |
|
| 134 |
+
|---------|--------|---------|--------|
|
| 135 |
+
| **SmartDoc** | ~1,380 | Research use | [Website](https://smartdoc.univ-lr.fr/) |
|
| 136 |
+
|
| 137 |
+
**Citation:**
|
| 138 |
+
```bibtex
|
| 139 |
+
@inproceedings{burie2015smartdoc,
|
| 140 |
+
title={ICDAR 2015 Competition on Smartphone Document Capture and OCR (SmartDoc)},
|
| 141 |
+
author={Burie, J.C. and Chazalon, J. and Coustaty, M. and others},
|
| 142 |
+
booktitle={ICDAR},
|
| 143 |
+
year={2015}
|
| 144 |
+
}
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
### COCO Dataset (Negative Samples)
|
| 148 |
+
|
| 149 |
+
Common Objects in Context dataset used for negative samples (images without documents).
|
| 150 |
+
|
| 151 |
+
| Dataset | Images | License | Source |
|
| 152 |
+
|---------|--------|---------|--------|
|
| 153 |
+
| **COCO val2017** | ~4,300 | [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) | [Website](https://cocodataset.org/) |
|
| 154 |
+
| **COCO train2017** | ~11,400 | [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) | [Website](https://cocodataset.org/) |
|
| 155 |
+
|
| 156 |
+
**Note:** Excluded categories that could be confused with documents: book, laptop, tv, cell phone, keyboard, mouse, remote, clock.
|
| 157 |
+
|
| 158 |
+
**Citation:**
|
| 159 |
+
```bibtex
|
| 160 |
+
@inproceedings{lin2014coco,
|
| 161 |
+
title={Microsoft COCO: Common Objects in Context},
|
| 162 |
+
author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and others},
|
| 163 |
+
booktitle={ECCV},
|
| 164 |
+
year={2014}
|
| 165 |
+
}
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
### Roboflow Universe (Various Documents)
|
| 169 |
+
|
| 170 |
+
Various document datasets from Roboflow Universe community.
|
| 171 |
+
|
| 172 |
+
| Category | Datasets | License | Source |
|
| 173 |
+
|----------|----------|---------|--------|
|
| 174 |
+
| **Documents** | document_segmentation_v2, doc_scanner, doc_rida, documento | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 175 |
+
| **Bills/Invoices** | bill_segmentation, cs_invoice | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 176 |
+
| **Receipts** | receipt_detection, receipt_occam, receipts_coolstuff | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 177 |
+
| **ID Cards** | card_corner, card_4_class, id_card_skew, id_detections, idcard_jj | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 178 |
+
| **Passports** | segment_passport | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 179 |
+
| **Books** | book_reader, page_segmentation_tecgp, book_cmjt2 | Various (check individual) | [Roboflow Universe](https://universe.roboflow.com/) |
|
| 180 |
+
|
| 181 |
+
**Note:** Roboflow datasets have various licenses. Please check the individual dataset pages on [Roboflow Universe](https://universe.roboflow.com/) for specific license terms.
|
| 182 |
+
|
| 183 |
+
## Features
|
| 184 |
+
|
| 185 |
+
| Feature | Type | Description |
|
| 186 |
+
|---------|------|-------------|
|
| 187 |
+
| `image` | Image | The document image (JPEG) |
|
| 188 |
+
| `filename` | string | Original filename for traceability |
|
| 189 |
+
| `is_negative` | bool | `True` if image contains no document |
|
| 190 |
+
| `corner_tl_x` | float32 | Top-left corner X coordinate (normalized 0-1) |
|
| 191 |
+
| `corner_tl_y` | float32 | Top-left corner Y coordinate (normalized 0-1) |
|
| 192 |
+
| `corner_tr_x` | float32 | Top-right corner X coordinate (normalized 0-1) |
|
| 193 |
+
| `corner_tr_y` | float32 | Top-right corner Y coordinate (normalized 0-1) |
|
| 194 |
+
| `corner_br_x` | float32 | Bottom-right corner X coordinate (normalized 0-1) |
|
| 195 |
+
| `corner_br_y` | float32 | Bottom-right corner Y coordinate (normalized 0-1) |
|
| 196 |
+
| `corner_bl_x` | float32 | Bottom-left corner X coordinate (normalized 0-1) |
|
| 197 |
+
| `corner_bl_y` | float32 | Bottom-left corner Y coordinate (normalized 0-1) |
|
| 198 |
+
|
| 199 |
+
### Corner Order
|
| 200 |
+
|
| 201 |
+
Corners are ordered **clockwise** starting from top-left:
|
| 202 |
+
|
| 203 |
+
```
|
| 204 |
+
1 (TL) -------- 2 (TR)
|
| 205 |
+
| |
|
| 206 |
+
| Document |
|
| 207 |
+
| |
|
| 208 |
+
4 (BL) -------- 3 (BR)
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
### Coordinate System
|
| 212 |
+
|
| 213 |
+
- Coordinates are **normalized** to the range [0, 1]
|
| 214 |
+
- To convert to pixel coordinates: `pixel_x = corner_x * image_width`
|
| 215 |
+
- Origin (0, 0) is at the **top-left** of the image
|
| 216 |
+
|
| 217 |
+
### Negative Samples
|
| 218 |
+
|
| 219 |
+
Images with `is_negative=True`:
|
| 220 |
+
- Do not contain any document
|
| 221 |
+
- All corner coordinates are `null`
|
| 222 |
+
- Useful for training classifiers to reject non-document images
|
| 223 |
+
|
| 224 |
+
## Usage
|
| 225 |
+
|
| 226 |
+
### Loading the Dataset
|
| 227 |
+
|
| 228 |
+
```python
|
| 229 |
+
from datasets import load_dataset
|
| 230 |
+
|
| 231 |
+
# Load all splits
|
| 232 |
+
dataset = load_dataset("mapo80/DocCornerDataset")
|
| 233 |
+
|
| 234 |
+
# Access specific splits
|
| 235 |
+
train_data = dataset["train"]
|
| 236 |
+
val_data = dataset["validation"]
|
| 237 |
+
test_data = dataset["test"]
|
| 238 |
+
|
| 239 |
+
print(f"Train: {len(train_data)} samples")
|
| 240 |
+
print(f"Val: {len(val_data)} samples")
|
| 241 |
+
print(f"Test: {len(test_data)} samples")
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
### Iterating Over Samples
|
| 245 |
+
|
| 246 |
+
```python
|
| 247 |
+
for sample in dataset["train"]:
|
| 248 |
+
image = sample["image"] # PIL Image
|
| 249 |
+
filename = sample["filename"]
|
| 250 |
+
|
| 251 |
+
if not sample["is_negative"]:
|
| 252 |
+
# Get corner coordinates (normalized 0-1)
|
| 253 |
+
corners = [
|
| 254 |
+
(sample["corner_tl_x"], sample["corner_tl_y"]),
|
| 255 |
+
(sample["corner_tr_x"], sample["corner_tr_y"]),
|
| 256 |
+
(sample["corner_br_x"], sample["corner_br_y"]),
|
| 257 |
+
(sample["corner_bl_x"], sample["corner_bl_y"]),
|
| 258 |
+
]
|
| 259 |
+
|
| 260 |
+
# Convert to pixel coordinates
|
| 261 |
+
w, h = image.size
|
| 262 |
+
corners_px = [(int(x * w), int(y * h)) for x, y in corners]
|
| 263 |
+
```
|
| 264 |
+
|
| 265 |
+
### Visualizing Annotations
|
| 266 |
+
|
| 267 |
+
```python
|
| 268 |
+
from PIL import Image, ImageDraw
|
| 269 |
+
|
| 270 |
+
def draw_corners(image, corners, color=(0, 255, 0), width=3):
|
| 271 |
+
"""Draw document corners on image."""
|
| 272 |
+
draw = ImageDraw.Draw(image)
|
| 273 |
+
w, h = image.size
|
| 274 |
+
|
| 275 |
+
# Convert normalized to pixel coords
|
| 276 |
+
points = [(int(c[0] * w), int(c[1] * h)) for c in corners]
|
| 277 |
+
|
| 278 |
+
# Draw polygon
|
| 279 |
+
for i in range(4):
|
| 280 |
+
draw.line([points[i], points[(i+1) % 4]], fill=color, width=width)
|
| 281 |
+
|
| 282 |
+
# Draw corner circles
|
| 283 |
+
for p in points:
|
| 284 |
+
r = 5
|
| 285 |
+
draw.ellipse([p[0]-r, p[1]-r, p[0]+r, p[1]+r], fill=color)
|
| 286 |
+
|
| 287 |
+
return image
|
| 288 |
+
|
| 289 |
+
# Example usage
|
| 290 |
+
sample = dataset["train"][0]
|
| 291 |
+
if not sample["is_negative"]:
|
| 292 |
+
corners = [
|
| 293 |
+
(sample["corner_tl_x"], sample["corner_tl_y"]),
|
| 294 |
+
(sample["corner_tr_x"], sample["corner_tr_y"]),
|
| 295 |
+
(sample["corner_br_x"], sample["corner_br_y"]),
|
| 296 |
+
(sample["corner_bl_x"], sample["corner_bl_y"]),
|
| 297 |
+
]
|
| 298 |
+
annotated = draw_corners(sample["image"].copy(), corners)
|
| 299 |
+
annotated.show()
|
| 300 |
+
```
|
| 301 |
+
|
| 302 |
+
### Training a Model (PyTorch Example)
|
| 303 |
+
|
| 304 |
+
```python
|
| 305 |
+
import torch
|
| 306 |
+
from torch.utils.data import DataLoader
|
| 307 |
+
from datasets import load_dataset
|
| 308 |
+
|
| 309 |
+
dataset = load_dataset("mapo80/DocCornerDataset")
|
| 310 |
+
|
| 311 |
+
def collate_fn(batch):
|
| 312 |
+
images = torch.stack([transform(s["image"]) for s in batch])
|
| 313 |
+
|
| 314 |
+
# Stack corner coordinates (8 values per sample)
|
| 315 |
+
corners = []
|
| 316 |
+
for s in batch:
|
| 317 |
+
if s["is_negative"]:
|
| 318 |
+
corners.append(torch.zeros(8))
|
| 319 |
+
else:
|
| 320 |
+
corners.append(torch.tensor([
|
| 321 |
+
s["corner_tl_x"], s["corner_tl_y"],
|
| 322 |
+
s["corner_tr_x"], s["corner_tr_y"],
|
| 323 |
+
s["corner_br_x"], s["corner_br_y"],
|
| 324 |
+
s["corner_bl_x"], s["corner_bl_y"],
|
| 325 |
+
]))
|
| 326 |
+
|
| 327 |
+
return images, torch.stack(corners)
|
| 328 |
+
|
| 329 |
+
train_loader = DataLoader(
|
| 330 |
+
dataset["train"],
|
| 331 |
+
batch_size=32,
|
| 332 |
+
shuffle=True,
|
| 333 |
+
collate_fn=collate_fn
|
| 334 |
+
)
|
| 335 |
+
```
|
| 336 |
+
|
| 337 |
+
## Model Performance
|
| 338 |
+
|
| 339 |
+
Models trained on this dataset achieve the following performance:
|
| 340 |
+
|
| 341 |
+
| Model | Input Size | mIoU (val) | mIoU (test) |
|
| 342 |
+
|-------|------------|------------|-------------|
|
| 343 |
+
| MobileNetV2 (alpha=0.35) | 224x224 | 0.9894 | 0.9826 |
|
| 344 |
+
| MobileNetV2 (alpha=0.35) | 256x256 | 0.9902 | 0.9819 |
|
| 345 |
+
|
| 346 |
+
*mIoU = Mean Intersection over Union between predicted and ground truth quadrilaterals*
|
| 347 |
+
|
| 348 |
+
## Citation
|
| 349 |
+
|
| 350 |
+
If you use this dataset in your research, please cite this dataset and the original source datasets:
|
| 351 |
+
|
| 352 |
+
```bibtex
|
| 353 |
+
@dataset{doccornerdataset2025,
|
| 354 |
+
author = {mapo80},
|
| 355 |
+
title = {DocCornerDataset: Document Corner Detection Dataset},
|
| 356 |
+
year = {2025},
|
| 357 |
+
publisher = {Hugging Face},
|
| 358 |
+
url = {https://huggingface.co/datasets/mapo80/DocCornerDataset}
|
| 359 |
+
}
|
| 360 |
+
```
|
| 361 |
+
|
| 362 |
+
**Please also cite the original datasets used:**
|
| 363 |
+
- MIDV-500/MIDV-2019 (Arlazarov et al., 2019)
|
| 364 |
+
- SmartDoc (Burie et al., 2015)
|
| 365 |
+
- COCO (Lin et al., 2014)
|
| 366 |
+
|
| 367 |
+
## License
|
| 368 |
+
|
| 369 |
+
⚠️ **This dataset is compiled from multiple sources with different licenses.**
|
| 370 |
+
|
| 371 |
+
| Source | License |
|
| 372 |
+
|--------|---------|
|
| 373 |
+
| MIDV-500/MIDV-2019 | Research use only |
|
| 374 |
+
| SmartDoc | Research use only |
|
| 375 |
+
| COCO | CC BY 4.0 |
|
| 376 |
+
| Roboflow datasets | Various (check individual datasets) |
|
| 377 |
+
|
| 378 |
+
**Before using this dataset, please review the licenses of the original datasets:**
|
| 379 |
+
- [MIDV-500](http://l3i-share.univ-lr.fr/MIDV500/)
|
| 380 |
+
- [MIDV-2019](http://l3i-share.univ-lr.fr/MIDV2019/)
|
| 381 |
+
- [SmartDoc](https://smartdoc.univ-lr.fr/)
|
| 382 |
+
- [COCO](https://cocodataset.org/#termsofuse)
|
| 383 |
+
- [Roboflow Universe](https://universe.roboflow.com/) (check individual datasets)
|
| 384 |
+
|
| 385 |
+
## Acknowledgments
|
| 386 |
+
|
| 387 |
+
This dataset was created by combining and processing multiple open-source datasets. We thank the authors of MIDV, SmartDoc, COCO, and the Roboflow community for making their data available.
|
| 388 |
+
|
| 389 |
+
## Related Projects
|
| 390 |
+
|
| 391 |
+
- [DocCornerNet](https://github.com/mapo80/DocCornerNet-CoordClass) - Document corner detection model trained on this dataset
|
collages/test_collage.jpg
ADDED
|
Git LFS Details
|
collages/train_collage.jpg
ADDED
|
Git LFS Details
|
collages/val_collage.jpg
ADDED
|
Git LFS Details
|
dataset_info.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"description": "Document Corner Detection Dataset - Best generalist training data from doc-scanner-dataset-rev-new with train_clean_iter3_plus_hard_full, val_clean_iter3, and test splits.",
|
| 3 |
+
"citation": "",
|
| 4 |
+
"homepage": "https://huggingface.co/datasets/mapo80/DocCornerDataset",
|
| 5 |
+
"license": "MIT",
|
| 6 |
+
"features": {
|
| 7 |
+
"image": {
|
| 8 |
+
"_type": "Image"
|
| 9 |
+
},
|
| 10 |
+
"filename": {
|
| 11 |
+
"dtype": "string",
|
| 12 |
+
"_type": "Value"
|
| 13 |
+
},
|
| 14 |
+
"is_negative": {
|
| 15 |
+
"dtype": "bool",
|
| 16 |
+
"_type": "Value"
|
| 17 |
+
},
|
| 18 |
+
"corner_tl_x": {
|
| 19 |
+
"dtype": "float32",
|
| 20 |
+
"_type": "Value"
|
| 21 |
+
},
|
| 22 |
+
"corner_tl_y": {
|
| 23 |
+
"dtype": "float32",
|
| 24 |
+
"_type": "Value"
|
| 25 |
+
},
|
| 26 |
+
"corner_tr_x": {
|
| 27 |
+
"dtype": "float32",
|
| 28 |
+
"_type": "Value"
|
| 29 |
+
},
|
| 30 |
+
"corner_tr_y": {
|
| 31 |
+
"dtype": "float32",
|
| 32 |
+
"_type": "Value"
|
| 33 |
+
},
|
| 34 |
+
"corner_br_x": {
|
| 35 |
+
"dtype": "float32",
|
| 36 |
+
"_type": "Value"
|
| 37 |
+
},
|
| 38 |
+
"corner_br_y": {
|
| 39 |
+
"dtype": "float32",
|
| 40 |
+
"_type": "Value"
|
| 41 |
+
},
|
| 42 |
+
"corner_bl_x": {
|
| 43 |
+
"dtype": "float32",
|
| 44 |
+
"_type": "Value"
|
| 45 |
+
},
|
| 46 |
+
"corner_bl_y": {
|
| 47 |
+
"dtype": "float32",
|
| 48 |
+
"_type": "Value"
|
| 49 |
+
}
|
| 50 |
+
},
|
| 51 |
+
"splits": {
|
| 52 |
+
"train": {
|
| 53 |
+
"name": "train",
|
| 54 |
+
"num_examples": 32968
|
| 55 |
+
},
|
| 56 |
+
"validation": {
|
| 57 |
+
"name": "validation",
|
| 58 |
+
"num_examples": 8645
|
| 59 |
+
},
|
| 60 |
+
"test": {
|
| 61 |
+
"name": "test",
|
| 62 |
+
"num_examples": 6652
|
| 63 |
+
}
|
| 64 |
+
},
|
| 65 |
+
"dataset_name": "DocCornerDataset-BestGeneralist",
|
| 66 |
+
"config_name": "default"
|
| 67 |
+
}
|
test/data-00000.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81033ccef24a1ce9550865998fd104ae944aea2d2d01aa65ec5fd9a0c1088b35
|
| 3 |
+
size 187249065
|
test/data-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c608f0a162f4d36735e29fa0235d72375dd9ba273a2f92a5363909d0e0ec4150
|
| 3 |
+
size 207242023
|
test/data-00002.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e4ddabeb0a5b8ba0a6f309125124883cdf187504470be8eb7b9f03588fa08b7
|
| 3 |
+
size 201549545
|
test/data-00003.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:399440a265b897b8fc9127d8ebfb59e0e20e4c76c98ffb7ac1460d38d0290342
|
| 3 |
+
size 184609604
|
test/data-00004.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a08d5caeb023b8cb5843886126e09cdda6e2465517228586003b4a6abaac7fdb
|
| 3 |
+
size 188008278
|
test/data-00005.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29a9330f725dfb073a7ba08d5222770ddd206efad06997a8ef68d4c5ae8208b1
|
| 3 |
+
size 187475738
|
test/data-00006.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2000d72a5fc7045378f54430c830886a3aa018da776feec27fb5790cada8d794
|
| 3 |
+
size 122265748
|
train/data-00000.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e90863aff16378469d556b995e6b2eb93aca7c368564238c3fc679830d1f03c
|
| 3 |
+
size 245581264
|
train/data-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c99a11dae76be5f089fb3d6187f0d6bde22f1786259a980a1ec5e04f02dd7b8
|
| 3 |
+
size 225901985
|
train/data-00002.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46f53dc47103ffe563f4927c74da0b89ee40b4a571361259dfd01a36524eed24
|
| 3 |
+
size 243745136
|
train/data-00003.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13ad406d2f1855c30cc60a271d8bbf442e73634c8a34a03fad80dac130be846c
|
| 3 |
+
size 235969999
|
train/data-00004.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0882d181c569140ab3b16c8a319d09fd253667fe837647a1870611e0d4966463
|
| 3 |
+
size 222544594
|
train/data-00005.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c9d89d396dcb5c4478cc2cc27c24cf0314fe3c1db0325aea0d48e65f23e7e44b
|
| 3 |
+
size 227663012
|
train/data-00006.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:717a7f643b7ba98ac1a0276e7716ab98cd66b680ac1392ac73562b1f570ca188
|
| 3 |
+
size 235616964
|
train/data-00007.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fa86d0e40b4e5cbc363b2c7631550096cca45938beaaf89f4afd1a6ee49b901
|
| 3 |
+
size 239061915
|
train/data-00008.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf8ceb638e9aa9aaa1ab7c7cb33447194ca2cc8ed10f875e88bd862cf5ecf5c1
|
| 3 |
+
size 249548663
|
train/data-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e71c60e7dbb0f0f1582f78b4377ab71a521f86414d847b3bb5c6b07603d4b982
|
| 3 |
+
size 235663810
|
train/data-00010.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba9c404c72bcc0e1e75b409ccb64905f9c0b9c1e99ac6f09719f2aef92e8878f
|
| 3 |
+
size 243550141
|
train/data-00011.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0c4bbe6cfcdf2de08b893da43d67e35132a250953df08d17933bb8275084790
|
| 3 |
+
size 261747503
|
train/data-00012.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84e042075718eb6e1989af5733723c93b3604fa7eb5f6cf9aac6b80b8a05c062
|
| 3 |
+
size 231585526
|
train/data-00013.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ac8e897436d06ae866dfa2e106617cf6153404cd572c541c61696770c536574
|
| 3 |
+
size 253029271
|
train/data-00014.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b20e7361d5a78233a3a0851ab0d88d1b21a1283af88fe28eebd23c644ea67880
|
| 3 |
+
size 235838760
|
train/data-00015.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:394d1d81338a57192a2c20378b67dcae6568ed1be92577b156211ed70fc65a46
|
| 3 |
+
size 248733094
|
train/data-00016.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9ca4836c93a4427bfa330b1da1bc469f547eb931e71ad6ffd6b811e30587a70
|
| 3 |
+
size 226313269
|
train/data-00017.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10a868d3015f0752a49c00cb55817d7d8d8355b2a4ae403a4d3bf6a552619c41
|
| 3 |
+
size 221433150
|
train/data-00018.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:976a731e37b3e6f375af4ff140993c14f893855b5db063a4f8723e2cc09d2f26
|
| 3 |
+
size 221989703
|
train/data-00019.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b8379ae0c16149f25eed903d4a8fb1861d0c433fbe9d52f6e135cb88972442e
|
| 3 |
+
size 229525920
|
train/data-00020.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:297810d8ce5b70423c8882cb37fef09f21dd793414be0c2a1364b6b039fa7adf
|
| 3 |
+
size 227941854
|
train/data-00021.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d23b4cc07480cb7805968fb2ac00732fe1e7c32b71019f30a1dede126b26a3c
|
| 3 |
+
size 219893111
|
train/data-00022.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e72154c7749656eb6c1cffa29347f7e2d896468d90a5d5bd695e8f8e672caa28
|
| 3 |
+
size 252591570
|
train/data-00023.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d4e73a0f8287af7abca137b7e2893ca35404b77f518f39a6db98f79a4151fc9
|
| 3 |
+
size 242109076
|
train/data-00024.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:941dba2f5af7fabe88dbf4093d0401cfcbff2bada8b8796306221a443b45ae53
|
| 3 |
+
size 231057726
|
train/data-00025.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47186c85bccb424955373397bc4de30b5e27f439a695351284745eeca8e91095
|
| 3 |
+
size 232546832
|
train/data-00026.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8be2917fad544e33629a6bf9222e21361284feb0c705505ccae3f34152546b85
|
| 3 |
+
size 254073214
|
train/data-00027.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:698c3eec92fb274b64432c61fbb94542f7bfe018621a43b70b8b353d993fe337
|
| 3 |
+
size 226628013
|
train/data-00028.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa7bf6e0ab0f3d046b9dee85fe2754336dfe37596c05671a3da15dc4f0729203
|
| 3 |
+
size 233434547
|
train/data-00029.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a911d8374dff7a8780fc9fae61f65a4d480c76d861d6a07fb4a83f8a3a23d98c
|
| 3 |
+
size 221715909
|
train/data-00030.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f00e29035a17710eda7b60cb9e450a333d4900da20367203c123b93b273d7c1
|
| 3 |
+
size 226693886
|
train/data-00031.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22d9b0ddddb057a569cc43f0a99248d35badd25293fd86896105390e0995103
|
| 3 |
+
size 249488525
|
train/data-00032.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f7f9e6b04324c19c97cd2ad0824704bfd7dd8f6dad688d954c0c5c618dea12e
|
| 3 |
+
size 224478003
|
val/data-00000.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85c62a6cbf56668babe0402292bca59f3c713c2850bd4c82cd3b50901411e75c
|
| 3 |
+
size 231248858
|
val/data-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7814c04a6d94bed26687c03e483f235d96fce9e046916ebad63715c1d1af1124
|
| 3 |
+
size 238458256
|
val/data-00002.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f706458f925a8bfe00d7e6101606eed5309743cfe540acd8260fa13026ebe608
|
| 3 |
+
size 229311455
|
val/data-00003.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4db89560f586f20f1821f542ca52ea87a3bc61dc0905f8c3ac75a64ea3ea2cd
|
| 3 |
+
size 252745235
|
val/data-00004.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e4aac8da5999ba8a7185fa41d8b6be1b958dcb068cd4711c56756f65ea5c5245
|
| 3 |
+
size 245623209
|