adalbertojunior commited on
Commit
f0c11e5
1 Parent(s): 3f5f87a

Create MININER.py

Browse files
Files changed (1) hide show
  1. MININER.py +126 -0
MININER.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import datasets
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _URL = "https://huggingface.co/datasets/adalbertojunior/MININER/resolve/main/"
9
+ _TRAIN_FILE = "train.conll"
10
+ _TEST_FILE = "test.conll"
11
+
12
+
13
+ class Harem(datasets.GeneratorBasedBuilder):
14
+ """Harem dataset."""
15
+
16
+ VERSION = datasets.Version("1.0.0")
17
+
18
+ BUILDER_CONFIGS = [
19
+ datasets.BuilderConfig(name='MININER',version=VERSION,description="MININER dataset"),
20
+ ]
21
+
22
+
23
+ def _info(self):
24
+ return datasets.DatasetInfo(
25
+ features=datasets.Features(
26
+ {
27
+ "id": datasets.Value("string"),
28
+ "tokens": datasets.Sequence(datasets.Value("string")),
29
+ "pos_tags": datasets.Sequence(
30
+ datasets.features.ClassLabel(
31
+ names=[
32
+ "O",
33
+ ]
34
+ )
35
+ ),
36
+ "chunk_tags": datasets.Sequence(
37
+ datasets.features.ClassLabel(
38
+ names=[
39
+ "O",
40
+ ]
41
+ )
42
+ ),
43
+ "ner_tags": datasets.Sequence(
44
+ datasets.features.ClassLabel(
45
+ names=[
46
+ "O",
47
+ "B-PESSOA",
48
+ "I-PESSOA",
49
+ "B-TEMPO",
50
+ "I-TEMPO",
51
+ ]
52
+ )
53
+ ),
54
+ }
55
+ ),
56
+ supervised_keys=None,
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ #
61
+ urls_to_download = {
62
+ "train": f"{_URL}{_TRAIN_FILE}",
63
+ "dev": f"{_URL}{_TRAIN_FILE}",
64
+ "test": f"{_URL}{_TRAIN_FILE}",
65
+ }
66
+
67
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
68
+
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={"filepath": downloaded_files["train"], "split": "train"},
73
+ ),
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.VALIDATION,
76
+ gen_kwargs={"filepath": downloaded_files["dev"], "split": "dev"},
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TEST,
80
+ gen_kwargs={"filepath": downloaded_files["test"], "split": "test"},
81
+ ),
82
+ ]
83
+
84
+ def _generate_examples(self, filepath, split):
85
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
86
+
87
+ logger.info("⏳ Generating examples from = %s", filepath)
88
+
89
+ with open(filepath, encoding="utf-8") as f:
90
+ guid = 0
91
+ tokens = []
92
+ pos_tags = []
93
+ chunk_tags = []
94
+ ner_tags = []
95
+
96
+ for line in f:
97
+ if line == "" or line == "\n":
98
+ if tokens:
99
+ yield guid, {
100
+ "id": str(guid),
101
+ "tokens": tokens,
102
+ "pos_tags": pos_tags,
103
+ "chunk_tags": chunk_tags,
104
+ "ner_tags": ner_tags,
105
+ }
106
+ guid += 1
107
+ tokens = []
108
+ pos_tags = []
109
+ chunk_tags = []
110
+ ner_tags = []
111
+
112
+ else:
113
+ splits = line.split(" ")
114
+ tokens.append(splits[0])
115
+ pos_tags.append(splits[1])
116
+ chunk_tags.append(splits[2])
117
+ ner_tags.append(splits[-1].rstrip())
118
+
119
+ # last example
120
+ yield guid, {
121
+ "id": str(guid),
122
+ "tokens": tokens,
123
+ "pos_tags": pos_tags,
124
+ "chunk_tags": chunk_tags,
125
+ "ner_tags": ner_tags,
126
+ }