Vadzim Kashko commited on
Commit
7f19f5a
1 Parent(s): 58f9176

refactor: all data

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ data filter=lfs diff=lfs merge=lfs -text
images/01.jpg → data/images.tar.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a670147613b0dc01ac657ea0584e6cd7d2e7f15ee70d5906dfa9d7424ba22e0
3
- size 433226
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57989232c87442f803da5e5123735f6f748b97b8c83670f9a8bb79ca72fa6bf2
3
+ size 3027445
data/ocr-trains-dataset.csv ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ image_id,image_name,annotations
2
+ 1,01.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(1655.38, 519.7), (1851.68, 561.4)], 'attributes': [{'name': 'text', 'text': '61640132'}]}]"
3
+ 2,02.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(248.8, 241.23), (352.9, 282.22)], 'attributes': [{'name': 'text', 'text': '55638167'}]}]"
4
+ 3,03.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(504.4, 221.5), (600.4, 240.4)], 'attributes': [{'name': 'text', 'text': '95095857'}]}]"
5
+ 4,04.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(719.93, 185.34), (828.27, 229.45)], 'attributes': [{'name': 'text', 'text': '63164818'}]}]"
6
+ 5,05.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(785.2, 240.1), (1010.1, 284.7)], 'attributes': [{'name': 'text', 'text': '63673149'}]}]"
7
+ 6,06.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(483.47, 286.6), (612.27, 322.83)], 'attributes': [{'name': 'text', 'text': '60517067'}]}]"
8
+ 7,07.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(295.5, 93.89), (438.09, 133.59)], 'attributes': [{'name': 'text', 'text': '62071246'}]}]"
9
+ 8,08.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(218.49, 14.1), (328.19, 48.68)], 'attributes': [{'name': 'text', 'text': '52605870'}]}]"
10
+ 9,09.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(142.71, 257.5), (235.2, 276.4)], 'attributes': [{'name': 'text', 'text': '95500344'}]}]"
11
+ 10,10.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(189.29, 56.4), (261.72, 78.7)], 'attributes': [{'name': 'text', 'text': '61677258'}]}]"
12
+ 11,11.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(389.2, 141.66), (482.4, 162.4)], 'attributes': [{'name': 'text', 'text': '42092288'}]}]"
13
+ 12,12.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(132.8, 164.7), (226.1, 183.8)], 'attributes': [{'name': 'text', 'text': '50915024'}]}]"
14
+ 13,13.jpg,"[{'type': 'box', 'label': 'numbers', 'points': [(495.2, 208.11), (618.1, 234.79)], 'attributes': [{'name': 'text', 'text': '68965877'}]}]"
images/02.jpg DELETED

Git LFS Details

  • SHA256: 9b39f5d853c615584520bc8b6404c00f12be6ebecc5bdd479dde3044430c1091
  • Pointer size: 131 Bytes
  • Size of remote file: 233 kB
images/03.jpg DELETED

Git LFS Details

  • SHA256: 40b223ea757eafb78bf8da65f3a95aa8b44d4f5b53f8ae67dd0b3c02f424fc27
  • Pointer size: 131 Bytes
  • Size of remote file: 158 kB
images/04.jpg DELETED

Git LFS Details

  • SHA256: fd59c652942af7be8d78bde5e29a0a6ed86042d19dc1c84c08bf44de0c2c4634
  • Pointer size: 131 Bytes
  • Size of remote file: 600 kB
images/05.jpg DELETED

Git LFS Details

  • SHA256: bdd98c1f632ed10a0af2c5a09ea811460eb8c60ea781a290dae680f1164ebd28
  • Pointer size: 131 Bytes
  • Size of remote file: 423 kB
images/06.jpg DELETED

Git LFS Details

  • SHA256: 3b4b4c7b7fd4bb77e2b7d655caf59516bd286162d9347522978a7007bd5adbe9
  • Pointer size: 131 Bytes
  • Size of remote file: 224 kB
images/07.jpg DELETED

Git LFS Details

  • SHA256: 9110f1c70615c8014b425029b097b052493d8a44ac8a469955658da906c5fd1d
  • Pointer size: 131 Bytes
  • Size of remote file: 217 kB
images/08.jpg DELETED

Git LFS Details

  • SHA256: 4e527bf42d0720254b3f928c4f338aa36a665ba3254d6bcb689482cbf04e55f3
  • Pointer size: 131 Bytes
  • Size of remote file: 143 kB
images/09.jpg DELETED

Git LFS Details

  • SHA256: 201c5208950ee5ab93f7616ac2e079d8684b8cbf72a8d507c0b1fdf3405f933f
  • Pointer size: 131 Bytes
  • Size of remote file: 204 kB
images/10.jpg DELETED

Git LFS Details

  • SHA256: 2480fbb3f2bb41506c0434f2f777d9469c3368658f9e69f4d90175b9075b5593
  • Pointer size: 130 Bytes
  • Size of remote file: 83.7 kB
images/11.jpg DELETED

Git LFS Details

  • SHA256: 3937bb62ce58bfb8ae2b0325e5fc1ba135987821fffad559b54e42e182b4a2d3
  • Pointer size: 131 Bytes
  • Size of remote file: 178 kB
images/12.jpg DELETED

Git LFS Details

  • SHA256: 29c7f664b5eae3fbf7e9be273aa1e43d02173eca41edd40ff452309b9c38760a
  • Pointer size: 131 Bytes
  • Size of remote file: 126 kB
images/13.jpg DELETED

Git LFS Details

  • SHA256: bc450681aff86872e2015c9385518c500198d6c0918f664d862a1d70afa9ecd4
  • Pointer size: 131 Bytes
  • Size of remote file: 128 kB
ocr-trains-dataset.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import pandas as pd
3
+
4
+ _CITATION = """\
5
+ @InProceedings{huggingface:dataset,
6
+ title = {ocr-trains-dataset},
7
+ author = {TrainingDataPro},
8
+ year = {2023}
9
+ }
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ The dataset is a collection of images along with corresponding bounding box annotations
14
+ that are specifically curated for **detecting pigs' heads** in images. The dataset
15
+ covers different *pig breeds, sizes, and orientations*, providing a comprehensive
16
+ representation of pig appearances.
17
+ The pig detection dataset provides a valuable resource for researchers working on pig
18
+ detection tasks. It offers a diverse collection of annotated images, allowing for
19
+ comprehensive algorithm development, evaluation, and benchmarking, ultimately aiding in
20
+ the development of accurate and robust models.
21
+ """
22
+ _NAME = "ocr-trains-dataset"
23
+
24
+ _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
25
+
26
+ _LICENSE = ""
27
+
28
+ _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
29
+
30
+
31
+ class OcrTrainsDataset(datasets.GeneratorBasedBuilder):
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "id": datasets.Value("int32"),
38
+ "image": datasets.Image(),
39
+ "bboxes": datasets.Value("string"),
40
+ }
41
+ ),
42
+ supervised_keys=None,
43
+ homepage=_HOMEPAGE,
44
+ citation=_CITATION,
45
+ )
46
+
47
+ def _split_generators(self, dl_manager):
48
+ images = dl_manager.download(f"{_DATA}images.tar.gz")
49
+ annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
50
+ images = dl_manager.iter_archive(images)
51
+ return [
52
+ datasets.SplitGenerator(
53
+ name=datasets.Split.TRAIN,
54
+ gen_kwargs={
55
+ "images": images,
56
+ "masks": masks,
57
+ "annotations": annotations,
58
+ },
59
+ ),
60
+ ]
61
+
62
+ def _generate_examples(self, images, annotations):
63
+ annotations_df = pd.read_csv(annotations)
64
+
65
+ for idx, (image_path, image) in enumerate(images):
66
+ yield idx, {
67
+ "id": annotations_df.loc[annotations_df["image_name"] == image_path][
68
+ "image_id"
69
+ ].values[0],
70
+ "image": {"path": image_path, "bytes": image.read()},
71
+ "bboxes": annotations_df["annotations"].iloc[idx],
72
+ }