File size: 3,675 Bytes
04c407b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"builder_name": "wider_face",
"citation": "@inproceedings{yang2016wider,\n Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},\n Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},\n Title = {WIDER FACE: A Face Detection Benchmark},\n Year = {2016}}\n",
"config_name": "default",
"dataset_name": "wider_face",
"dataset_size": 19064393,
"description": "WIDER FACE dataset is a face detection benchmark dataset, of which images are\nselected from the publicly available WIDER dataset. We choose 32,203 images and\nlabel 393,703 faces with a high degree of variability in scale, pose and\nocclusion as depicted in the sample images. WIDER FACE dataset is organized\nbased on 61 event classes. For each event class, we randomly select 40%/10%/50%\ndata as training, validation and testing sets. We adopt the same evaluation\nmetric employed in the PASCAL VOC dataset. Similar to MALF and Caltech datasets,\nwe do not release bounding box ground truth for the test images. Users are\nrequired to submit final prediction files, which we shall proceed to evaluate.\n",
"download_checksums": {
"https://huggingface.co/datasets/wider_face/resolve/main/data/WIDER_train.zip": {
"num_bytes": 1465602149,
"checksum": null
},
"https://huggingface.co/datasets/wider_face/resolve/main/data/WIDER_val.zip": {
"num_bytes": 362752168,
"checksum": null
},
"https://huggingface.co/datasets/wider_face/resolve/main/data/WIDER_test.zip": {
"num_bytes": 1844140520,
"checksum": null
},
"https://huggingface.co/datasets/wider_face/resolve/main/data/wider_face_split.zip": {
"num_bytes": 3591642,
"checksum": null
}
},
"download_size": 3676086479,
"features": {
"image": {
"_type": "Image"
},
"faces": {
"feature": {
"bbox": {
"feature": {
"dtype": "float32",
"_type": "Value"
},
"length": 4,
"_type": "Sequence"
},
"blur": {
"names": [
"clear",
"normal",
"heavy"
],
"_type": "ClassLabel"
},
"expression": {
"names": [
"typical",
"exaggerate"
],
"_type": "ClassLabel"
},
"illumination": {
"names": [
"normal",
"exaggerate "
],
"_type": "ClassLabel"
},
"occlusion": {
"names": [
"no",
"partial",
"heavy"
],
"_type": "ClassLabel"
},
"pose": {
"names": [
"typical",
"atypical"
],
"_type": "ClassLabel"
},
"invalid": {
"dtype": "bool",
"_type": "Value"
}
},
"_type": "Sequence"
}
},
"homepage": "http://shuoyang1213.me/WIDERFACE/",
"license": "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)",
"size_in_bytes": 3695150872,
"splits": {
"train": {
"name": "train",
"num_bytes": 12111854,
"num_examples": 12880,
"dataset_name": "wider_face"
},
"test": {
"name": "test",
"num_bytes": 3938170,
"num_examples": 16097,
"dataset_name": "wider_face"
},
"validation": {
"name": "validation",
"num_bytes": 3014369,
"num_examples": 3226,
"dataset_name": "wider_face"
}
},
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
}
} |