phpaiola commited on
Commit
dcf838a
1 Parent(s): b5f0b1a

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +3 -0
  2. recognasumm.py +93 -0
  3. test.jsonl +3 -0
  4. train.jsonl +3 -0
  5. validation.jsonl +3 -0
.gitattributes CHANGED
@@ -59,3 +59,6 @@ validation.xlsx filter=lfs diff=lfs merge=lfs -text
59
  test.json filter=lfs diff=lfs merge=lfs -text
60
  train.json filter=lfs diff=lfs merge=lfs -text
61
  validation.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
59
  test.json filter=lfs diff=lfs merge=lfs -text
60
  train.json filter=lfs diff=lfs merge=lfs -text
61
  validation.json filter=lfs diff=lfs merge=lfs -text
62
+ test.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ validation.jsonl filter=lfs diff=lfs merge=lfs -text
recognasumm.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+ _CITATION = """\
8
+ Coming soon
9
+ }
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ RecognaSumm is a novel and comprehensive database specifically designed for the task of automatic text summarization in Portuguese. RecognaSumm stands out due to its diverse origin, composed of news collected from a variety of information sources, including agencies and online news portals. The database was constructed using web scraping techniques and careful curation, re sulting in a rich and representative collection of documents covering various topics and journalis tic styles. The creation of RecognaSumm aims to fill a significant void in Portuguese language summarization research, providing a training and evaluation foundation that can be used for the development and enhancement of automated summarization models.
14
+ """
15
+
16
+ _HOMEPAGE = ""
17
+
18
+ _LICENSE = "mit"
19
+
20
+ class RecognaSumm(datasets.GeneratorBasedBuilder):
21
+
22
+ VERSION = datasets.Version("1.0.0")
23
+
24
+ BUILDER_CONFIGS = [
25
+ datasets.BuilderConfig(name="default", version=VERSION, description="Default setup of dataset"),
26
+ ]
27
+
28
+ DEFAULT_CONFIG_NAME = "default"
29
+
30
+ def _info(self):
31
+
32
+ features = datasets.Features(
33
+ {
34
+ "index": datasets.Value("int"),
35
+ "Titulo": datasets.Value("string"),
36
+ "Subtitulo": datasets.Value("string"),
37
+ "Noticia": datasets.Value("string"),
38
+ "Categoria": datasets.Value("string"),
39
+ "Autor": datasets.Value("string"),
40
+ "Data": datasets.Value("string"),
41
+ "URL": datasets.Value("string"),
42
+ "Autor_corrigido": datasets.Value("string"),
43
+ "Sumario": datasets.Value("string"),
44
+ }
45
+ )
46
+
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=features,
50
+ homepage=_HOMEPAGE,
51
+ license=_LICENSE,
52
+ citation=_CITATION,
53
+ )
54
+
55
+ def _split_generators(self, dl_manager):
56
+ return [
57
+ datasets.SplitGenerator(
58
+ name=datasets.Split.TRAIN,
59
+ # These kwargs will be passed to _generate_examples
60
+ gen_kwargs={
61
+ "filepath": "train.jsonl",
62
+ "split": "train",
63
+ },
64
+ ),
65
+ datasets.SplitGenerator(
66
+ name=datasets.Split.VALIDATION,
67
+ # These kwargs will be passed to _generate_examples
68
+ gen_kwargs={
69
+ "filepath": "validation.jsonl",
70
+ "split": "validation",
71
+ },
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TEST,
75
+ # These kwargs will be passed to _generate_examples
76
+ gen_kwargs={
77
+ "filepath": "test.jsonl",
78
+ "split": "test"
79
+ },
80
+ ),
81
+ ]
82
+
83
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
84
+ def _generate_examples(self, filepath, split):
85
+ with open(filepath, encoding="utf-8") as f:
86
+ for key, row in enumerate(f):
87
+ data = json.loads(row)
88
+ yield key, {
89
+ "index": data["index"],
90
+ "Noticia": data["Noticia"],
91
+ "Sumario": data["Sumario"]
92
+ }
93
+
test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c3b747956f856da20925156b7aa8a1d54b7091d3c24382076464fd4b957e4c1
3
+ size 95999004
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:925f2ab858ccbe99bfa4e5d7e7ea7adafb4d75a5b0498d6f90ac4cffd5233bc9
3
+ size 288582909
validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24baaf963b74205ea9633e74f2efd015a906365e3de2e7c3d531cc64e54ef8e7
3
+ size 95284358