Datasets:

Languages:
English
ArXiv:
License:
mtrem122Marc commited on
Commit
3f1f507
1 Parent(s): a3e27aa

Delete loading script

Browse files
Files changed (1) hide show
  1. samsum.py +0 -112
samsum.py DELETED
@@ -1,112 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """SAMSum dataset."""
16
-
17
-
18
- import json
19
-
20
- import py7zr
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """
26
- @article{gliwa2019samsum,
27
- title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},
28
- author={Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander},
29
- journal={arXiv preprint arXiv:1911.12237},
30
- year={2019}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """
35
- SAMSum Corpus contains over 16k chat dialogues with manually annotated
36
- summaries.
37
- There are two features:
38
- - dialogue: text of dialogue.
39
- - summary: human written summary of the dialogue.
40
- - id: id of a example.
41
- """
42
-
43
- _HOMEPAGE = "https://arxiv.org/abs/1911.12237"
44
-
45
- _LICENSE = "CC BY-NC-ND 4.0"
46
-
47
- _URL = "https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z"
48
-
49
-
50
- class Samsum(datasets.GeneratorBasedBuilder):
51
- """SAMSum Corpus dataset."""
52
-
53
- VERSION = datasets.Version("1.1.0")
54
-
55
- BUILDER_CONFIGS = [
56
- datasets.BuilderConfig(name="samsum"),
57
- ]
58
-
59
- def _info(self):
60
- features = datasets.Features(
61
- {
62
- "id": datasets.Value("string"),
63
- "dialogue": datasets.Value("string"),
64
- "summary": datasets.Value("string"),
65
- }
66
- )
67
- return datasets.DatasetInfo(
68
- description=_DESCRIPTION,
69
- features=features,
70
- supervised_keys=None,
71
- homepage=_HOMEPAGE,
72
- license=_LICENSE,
73
- citation=_CITATION,
74
- )
75
-
76
- def _split_generators(self, dl_manager):
77
- """Returns SplitGenerators."""
78
- path = dl_manager.download(_URL)
79
- return [
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={
83
- "filepath": (path, "train.json"),
84
- "split": "train",
85
- },
86
- ),
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TEST,
89
- gen_kwargs={
90
- "filepath": (path, "test.json"),
91
- "split": "test",
92
- },
93
- ),
94
- datasets.SplitGenerator(
95
- name=datasets.Split.VALIDATION,
96
- gen_kwargs={
97
- "filepath": (path, "val.json"),
98
- "split": "val",
99
- },
100
- ),
101
- ]
102
-
103
- def _generate_examples(self, filepath, split):
104
- """Yields examples."""
105
- path, fname = filepath
106
- with open(path, "rb") as f:
107
- with py7zr.SevenZipFile(f, "r") as z:
108
- for name, bio in z.readall().items():
109
- if name == fname:
110
- data = json.load(bio)
111
- for example in data:
112
- yield example["id"], example