adalbertojunior commited on
Commit
e1603e9
1 Parent(s): d8a8419

Upload punctuation-ptbr.py

Browse files
Files changed (1) hide show
  1. punctuation-ptbr.py +124 -0
punctuation-ptbr.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import datasets
3
+ logger = datasets.logging.get_logger(__name__)
4
+ _URL = "https://huggingface.co/datasets/adalbertojunior/punctuation-ptbr/resolve/main/"
5
+ _TRAIN_FILE = "train.txt"
6
+ _TEST_FILE = "test.txt"
7
+ _DEV_FILE = "dev.txt"
8
+
9
+
10
+ class Punctuation(datasets.GeneratorBasedBuilder):
11
+ """Punctuation dataset."""
12
+
13
+ VERSION = datasets.Version("1.0.0")
14
+ BUILDER_CONFIGS = [
15
+ datasets.BuilderConfig(
16
+ name='punctuation-ptbr', version=VERSION, description="punctuation-ptbr dataset"),
17
+ ]
18
+
19
+ def _info(self):
20
+ return datasets.DatasetInfo(
21
+ features=datasets.Features(
22
+ {
23
+ "id": datasets.Value("string"),
24
+ "tokens": datasets.Sequence(datasets.Value("string")),
25
+ "pos_tags": datasets.Sequence(
26
+ datasets.features.ClassLabel(
27
+ names=[
28
+ "O",
29
+ ]
30
+ )
31
+ ),
32
+ "chunk_tags": datasets.Sequence(
33
+ datasets.features.ClassLabel(
34
+ names=[
35
+ "O",
36
+ ]
37
+ )
38
+ ),
39
+ "ner_tags": datasets.Sequence(
40
+ datasets.features.ClassLabel(
41
+ names=[
42
+ "O",
43
+ "B-Virgula",
44
+ "B-Ponto",
45
+ "B-Interrogacao",
46
+ ]
47
+ )
48
+ ),
49
+ }
50
+ ),
51
+ supervised_keys=None,
52
+ )
53
+
54
+ def _split_generators(self, dl_manager):
55
+ #
56
+ urls_to_download = {
57
+ "train": f"{_URL}{_TRAIN_FILE}",
58
+ "dev": f"{_URL}{_DEV_FILE}",
59
+ "test": f"{_URL}{_TEST_FILE}",
60
+ }
61
+
62
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
63
+
64
+ return [
65
+ datasets.SplitGenerator(
66
+ name=datasets.Split.TRAIN,
67
+ gen_kwargs={
68
+ "filepath": downloaded_files["train"], "split": "train"},
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "filepath": downloaded_files["dev"], "split": "dev"},
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TEST,
77
+ gen_kwargs={
78
+ "filepath": downloaded_files["test"], "split": "test"},
79
+ ),
80
+ ]
81
+
82
+ def _generate_examples(self, filepath, split):
83
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
84
+
85
+ logger.info("⏳ Generating examples from = %s", filepath)
86
+
87
+ with open(filepath, encoding="utf-8") as f:
88
+ guid = 0
89
+ tokens = []
90
+ pos_tags = []
91
+ chunk_tags = []
92
+ ner_tags = []
93
+
94
+ for line in f:
95
+ if line == "" or line == "\n":
96
+ if tokens:
97
+ yield guid, {
98
+ "id": str(guid),
99
+ "tokens": tokens,
100
+ "pos_tags": pos_tags,
101
+ "chunk_tags": chunk_tags,
102
+ "ner_tags": ner_tags,
103
+ }
104
+ guid += 1
105
+ tokens = []
106
+ pos_tags = []
107
+ chunk_tags = []
108
+ ner_tags = []
109
+
110
+ else:
111
+ splits = line.split(" ")
112
+ tokens.append(splits[0])
113
+ pos_tags.append(splits[1])
114
+ chunk_tags.append(splits[2])
115
+ ner_tags.append(splits[-1].rstrip())
116
+
117
+ # last example
118
+ yield guid, {
119
+ "id": str(guid),
120
+ "tokens": tokens,
121
+ "pos_tags": pos_tags,
122
+ "chunk_tags": chunk_tags,
123
+ "ner_tags": ner_tags,
124
+ }