parquet-converter commited on
Commit
e8dc0e0
·
1 Parent(s): f4daca1

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,51 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: bsd
3
- ---
 
 
 
 
person_resume_funsd_format_v5.zip → funsd/layoutlm_resume_data-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:150cec18a8906f8f99ed4c6af4a337adb76d4063fb78c3e34e4c932b38f279c8
3
- size 57748880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30535c4fe3ddbbab3fa88c72ff5268b0e3e5d4ad2099ec23f797db8a624dc6bd
3
+ size 15312537
person_resume_funsd_format_v4.zip → funsd/layoutlm_resume_data-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c0ee1d1605d17b1e24685032ecbaddcfd8027224c564ab034d403c8dc9a45cf
3
- size 57750459
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c6360921fac8de6f6d5ee02874976446d1c6123786755a7885d65c85419dee
3
+ size 61392065
layoutlm_resume_data.py DELETED
@@ -1,148 +0,0 @@
1
- # coding=utf-8
2
-
3
- import json
4
- import os
5
-
6
- from PIL import Image
7
-
8
- import datasets
9
-
10
- def load_image(image_path):
11
- image = Image.open(image_path).convert("RGB")
12
- w, h = image.size
13
- return image, (w, h)
14
-
15
- def normalize_bbox(bbox, size):
16
- return [
17
- int(1000 * bbox[0] / size[0]),
18
- int(1000 * bbox[1] / size[1]),
19
- int(1000 * bbox[2] / size[0]),
20
- int(1000 * bbox[3] / size[1]),
21
- ]
22
-
23
- logger = datasets.logging.get_logger(__name__)
24
-
25
-
26
-
27
-
28
- class ResumeDataConfig(datasets.BuilderConfig):
29
- """BuilderConfig for Resume NER"""
30
-
31
- def __init__(self, **kwargs):
32
- """BuilderConfig for FUNSD.
33
- Args:
34
- **kwargs: keyword arguments forwarded to super.
35
- """
36
- super(ResumeDataConfig, self).__init__(**kwargs)
37
-
38
-
39
- class ResumeData(datasets.GeneratorBasedBuilder):
40
-
41
- BUILDER_CONFIGS = [
42
- ResumeDataConfig(name="funsd",
43
- version=datasets.Version("1.0.0"),
44
- description="Resume Dataset"),
45
- ]
46
-
47
- def _info(self):
48
- return datasets.DatasetInfo(
49
- description="",
50
- features=datasets.Features(
51
- {
52
- "id": datasets.Value("string"),
53
- "tokens": datasets.Sequence(datasets.Value("string")),
54
- "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
55
- "ner_tags": datasets.Sequence(
56
- datasets.features.ClassLabel(
57
- names=["O",
58
- "B-ADDRESS",
59
- "B-EMAIL",
60
- "B-NAME",
61
- "B-PHONE",
62
- "B-SECTIONHEADER",
63
- "E-ADDRESS",
64
- "E-EMAIL",
65
- "E-NAME",
66
- "E-PHONE",
67
- "E-SECTIONHEADER",
68
- "I-ADDRESS",
69
- "I-EMAIL",
70
- "I-NAME",
71
- "I-PHONE",
72
- "I-SECTIONHEADER",
73
- "S-ADDRESS",
74
- "S-EMAIL",
75
- "S-NAME",
76
- "S-PHONE",
77
- "S-SECTIONHEADER"
78
- ]
79
- )
80
- ),
81
- "image": datasets.features.Image(),
82
- }
83
- ),
84
- supervised_keys=None,
85
- homepage="",
86
- citation="",
87
- )
88
-
89
- def _split_generators(self, dl_manager):
90
- """Returns SplitGenerators."""
91
- downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/Kunling/layoutlm_resume_data/resolve/main/person_resume_funsd_format_v5.zip")
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training/"}
95
- ),
96
- datasets.SplitGenerator(
97
- name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing/"}
98
- ),
99
- ]
100
-
101
- def get_line_bbox(self, bboxs):
102
- x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
103
- y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
104
-
105
- x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
106
-
107
- assert x1 >= x0 and y1 >= y0
108
- bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
109
- return bbox
110
-
111
- def _generate_examples(self, filepath):
112
- logger.info("⏳ Generating examples from = %s", filepath)
113
- ann_dir = os.path.join(filepath, "annotations")
114
- img_dir = os.path.join(filepath, "images")
115
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
116
- tokens = []
117
- bboxes = []
118
- ner_tags = []
119
-
120
- file_path = os.path.join(ann_dir, file)
121
- with open(file_path, "r", encoding="utf8") as f:
122
- data = json.load(f)
123
- image_path = os.path.join(img_dir, file)
124
- image_path = image_path.replace("json", "jpeg")
125
- image, size = load_image(image_path)
126
- for item in data["form"]:
127
- cur_line_bboxes = []
128
- words, label = item["words"], item["label"]
129
- words = [w for w in words if w["text"].strip() != ""]
130
- if len(words) == 0:
131
- continue
132
- if label.lower() == "other":
133
- for w in words:
134
- tokens.append(w["text"])
135
- ner_tags.append("O")
136
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
137
- else:
138
- tokens.append(words[0]["text"])
139
- ner_tags.append("B-" + label.upper())
140
- cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
141
- for w in words[1:]:
142
- tokens.append(w["text"])
143
- ner_tags.append("I-" + label.upper())
144
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
145
- cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
146
- bboxes.extend(cur_line_bboxes)
147
- yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
148
- "image": image}