bergoliveira commited on
Commit
61303bf
1 Parent(s): 6f4ec81

Upload pl-corpus.py

Browse files
Files changed (1) hide show
  1. pl-corpus.py +123 -0
pl-corpus.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+
6
+
7
+ _CITATION = """
8
+ ALBUQUERQUE2022,author="Albuquerque, Hidelberg O. and Costa, Rosimeire and Silvestre, Gabriel and Souza, Ellen and da Silva, N{\'a}dia F. F. and Vit{\'o}rio, Douglas and Moriyama, Gyovana and Martins, Lucas and Soezima, Luiza and Nunes, Augusto and Siqueira, Felipe and Tarrega, Jo{\~a}o P. and Beinotti, Joao V. and Dias, Marcio and Silva, Matheus and Gardini, Miguel and Silva, Vinicius and de Carvalho, Andr{\'e} C. P. L. F. and Oliveira, Adriano L. I.", title="{UlyssesNER-Br}: A Corpus of Brazilian Legislative Documents for Named Entity Recognition", booktitle="Computational Processing of the Portuguese Language", year="2022", pages="3--14",@inproceedings{inPress, PROPOR2022}
9
+ """
10
+
11
+ _DESCRIPTION = """
12
+ PL-corpus is a Portuguese language dataset for named entity recognition applied to legislative documents. Its parte of the UlyssesBR-corpus, and consists entirely of manually annotated public bills texts (projetos de leis) and contains tags for persons, locations, date entities, organizations, legal foundation and bills.
13
+ """
14
+
15
+ _HOMEPAGE = "https://github.com/Convenio-Camara-dos-Deputados/ulyssesner-br-propor"
16
+
17
+ _URL = "https://raw.githubusercontent.com/bergoliveira/assessment-of-deep-learning-models-icann/main/pl-corpus/"
18
+ _TRAINING_FILE = "train.conll"
19
+ _DEV_FILE = "dev.conll"
20
+ _TEST_FILE = "test.conll"
21
+
22
+
23
+ class PlCorpus(datasets.GeneratorBasedBuilder):
24
+ """pL-corpus dataset"""
25
+
26
+ VERSION = datasets.Version("1.0.0")
27
+
28
+ BUILDER_CONFIGS = [
29
+ datasets.BuilderConfig(name="pl-corpus", version=VERSION, description="PL-corpus dataset"),
30
+ ]
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "id": datasets.Value("string"),
38
+ "tokens": datasets.Sequence(datasets.Value("string")),
39
+ "ner_tags": datasets.Sequence(
40
+ datasets.features.ClassLabel(
41
+ names=[
42
+ "O",
43
+ "B-ORGANIZACAO",
44
+ "I-ORGANIZACAO",
45
+ "B-PESSOA",
46
+ "I-PESSOA",
47
+ "B-DATA",
48
+ "I-DATA",
49
+ "B-LOCAL",
50
+ "I-LOCAL",
51
+ "B-FUNDAMENTO",
52
+ "I-FUNDAMENTO",
53
+ "B-PRODUTODELEI",
54
+ "I-PRODUTODELEI",
55
+ "B-EVENTO",
56
+ "I-EVENTO",
57
+ ]
58
+ )
59
+ ),
60
+ }
61
+ ),
62
+ supervised_keys=None,
63
+ homepage="https://github.com/Convenio-Camara-dos-Deputados/ulyssesner-br-propor",
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ """Returns SplitGenerators."""
69
+ urls_to_download = {
70
+ "train": f"{_URL}{_TRAINING_FILE}",
71
+ "dev": f"{_URL}{_DEV_FILE}",
72
+ "test": f"{_URL}{_TEST_FILE}",
73
+ }
74
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
75
+
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={"filepath": downloaded_files["train"], "split": "train"},
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.VALIDATION,
83
+ gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"},
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TEST,
87
+ gen_kwargs={"filepath": downloaded_files["test"], "split": "test"},
88
+ ),
89
+ ]
90
+
91
+ def _generate_examples(self, filepath, split):
92
+ """Yields examples."""
93
+
94
+ logger.info("⏳ Generating examples from = %s", filepath)
95
+
96
+ with open(filepath, encoding="utf-8") as f:
97
+
98
+ guid = 0
99
+ tokens = []
100
+ ner_tags = []
101
+
102
+ for line in f:
103
+ if line == "" or line == "\n":
104
+ if tokens:
105
+ yield guid, {
106
+ "id": str(guid),
107
+ "tokens": tokens,
108
+ "ner_tags": ner_tags,
109
+ }
110
+ guid += 1
111
+ tokens = []
112
+ ner_tags = []
113
+ else:
114
+ splits = line.split(" ")
115
+ tokens.append(splits[0])
116
+ ner_tags.append(splits[1].rstrip())
117
+
118
+ # last example
119
+ yield guid, {
120
+ "id": str(guid),
121
+ "tokens": tokens,
122
+ "ner_tags": ner_tags,
123
+ }