adalbertojunior commited on
Commit
ebcb1f6
1 Parent(s): 039dfd1

Create segmentacao2.py

Browse files
Files changed (1) hide show
  1. segmentacao2.py +123 -0
segmentacao2.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import datasets
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _URL = "https://huggingface.co/datasets/adalbertojunior/segmentacao2/resolve/main/"
9
+ _TRAIN_FILE = "train.conll"
10
+ _TEST_FILE = "test.conll"
11
+
12
+
13
+ class Harem(datasets.GeneratorBasedBuilder):
14
+ """Harem dataset."""
15
+
16
+ VERSION = datasets.Version("1.0.0")
17
+
18
+ BUILDER_CONFIGS = [
19
+ datasets.BuilderConfig(name='segmentacao',version=VERSION,description="segmentacao dataset"),
20
+ ]
21
+
22
+
23
+ def _info(self):
24
+ return datasets.DatasetInfo(
25
+ features=datasets.Features(
26
+ {
27
+ "id": datasets.Value("string"),
28
+ "tokens": datasets.Sequence(datasets.Value("string")),
29
+ "pos_tags": datasets.Sequence(
30
+ datasets.features.ClassLabel(
31
+ names=[
32
+ "O",
33
+ ]
34
+ )
35
+ ),
36
+ "chunk_tags": datasets.Sequence(
37
+ datasets.features.ClassLabel(
38
+ names=[
39
+ "O",
40
+ ]
41
+ )
42
+ ),
43
+ "ner_tags": datasets.Sequence(
44
+ datasets.features.ClassLabel(
45
+ names=[
46
+ "B-Segmento",
47
+ "I-Segmento",
48
+ ]
49
+ )
50
+ ),
51
+ }
52
+ ),
53
+ supervised_keys=None,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ #
58
+ urls_to_download = {
59
+ "train": f"{_URL}{_TRAIN_FILE}",
60
+ "dev": f"{_URL}{_TEST_FILE }",
61
+ "test": f"{_URL}{_TEST_FILE }",
62
+ }
63
+
64
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
65
+
66
+ return [
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TRAIN,
69
+ gen_kwargs={"filepath": downloaded_files["train"], "split": "train"},
70
+ ),
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.VALIDATION,
73
+ gen_kwargs={"filepath": downloaded_files["dev"], "split": "dev"},
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TEST,
77
+ gen_kwargs={"filepath": downloaded_files["test"], "split": "test"},
78
+ ),
79
+ ]
80
+
81
+ def _generate_examples(self, filepath, split):
82
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
83
+
84
+ logger.info("⏳ Generating examples from = %s", filepath)
85
+
86
+ with open(filepath, encoding="utf-8") as f:
87
+ guid = 0
88
+ tokens = []
89
+ pos_tags = []
90
+ chunk_tags = []
91
+ ner_tags = []
92
+
93
+ for line in f:
94
+ if line == "" or line == "\n":
95
+ if tokens:
96
+ yield guid, {
97
+ "id": str(guid),
98
+ "tokens": tokens,
99
+ "pos_tags": pos_tags,
100
+ "chunk_tags": chunk_tags,
101
+ "ner_tags": ner_tags,
102
+ }
103
+ guid += 1
104
+ tokens = []
105
+ pos_tags = []
106
+ chunk_tags = []
107
+ ner_tags = []
108
+
109
+ else:
110
+ splits = line.split(" ")
111
+ tokens.append(splits[0])
112
+ pos_tags.append(splits[1])
113
+ chunk_tags.append(splits[2])
114
+ ner_tags.append(splits[-1].rstrip())
115
+
116
+ # last example
117
+ yield guid, {
118
+ "id": str(guid),
119
+ "tokens": tokens,
120
+ "pos_tags": pos_tags,
121
+ "chunk_tags": chunk_tags,
122
+ "ner_tags": ner_tags,
123
+ }