Quentin Lhoest commited on
Commit
3c425c3
1 Parent(s): 7ac8c04

Release: 1.18.1

Browse files

Commit from https://github.com/huggingface/datasets/commit/218e496519ff14b4bc69ea559616af6f2ef89e57

Files changed (1) hide show
  1. wiki_summary.py +85 -85
wiki_summary.py CHANGED
@@ -1,85 +1,85 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Wiki Summary."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
-
24
-
25
- _DESCRIPTION = """\
26
- The dataset extracted from Persian Wikipedia into the form of articles and highlights and cleaned the dataset into pairs of articles and highlights and reduced the articles' length (only version 1.0.0) and highlights' length to a maximum of 512 and 128, respectively, suitable for parsBERT.
27
- """
28
-
29
- _CITATION = """\
30
- @misc{Bert2BertWikiSummaryPersian,
31
- author = {Mehrdad Farahani},
32
- title = {Summarization using Bert2Bert model on WikiSummary dataset},
33
- year = {2020},
34
- publisher = {GitHub},
35
- journal = {GitHub repository},
36
- howpublished = {https://github.com/m3hrdadfi/wiki-summary},
37
- }
38
- """
39
-
40
- _ID = "id"
41
- _LINK = "link"
42
- _TITLE = "title"
43
- _ARTICLE = "article"
44
- _HIGHLIGHTS = "highlights"
45
-
46
- _HOMEPAGE = "https://github.com/m3hrdadfi/wiki-summary"
47
-
48
- _TRAIN_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-CaP3xHgZxOGjQ3pXC5tr9YnIajmel-t&export=download"
49
- _TEST_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-9G4yYP6YO8oMA-o4cTe9NJpEyr7x5jg&export=download"
50
- _DEV_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-2g2gkDeNaN-vth-8Mgit_ovmSkVh91u&export=download"
51
-
52
-
53
- class WikiSummary(datasets.GeneratorBasedBuilder):
54
- """Wiki Summary"""
55
-
56
- VERSION = datasets.Version("1.1.0")
57
-
58
- def _info(self):
59
- feature_names = [_ID, _LINK, _TITLE, _ARTICLE, _HIGHLIGHTS]
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features({k: datasets.Value("string") for k in feature_names}),
63
- homepage=_HOMEPAGE,
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- path_train = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
69
- path_test = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
70
- path_dev = dl_manager.download_and_extract(_DEV_DOWNLOAD_URL)
71
- return [
72
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path_train}),
73
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path_test}),
74
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": path_dev}),
75
- ]
76
-
77
- def _generate_examples(self, filepath):
78
- """Generate Wiki summary examples."""
79
- with open(filepath, encoding="utf8") as csv_file:
80
- csv_reader = csv.reader(
81
- csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
82
- )
83
- for id_, row in enumerate(csv_reader):
84
- if len(row) == 5:
85
- yield id_, {_ID: row[0], _LINK: row[1], _TITLE: row[2], _ARTICLE: row[3], _HIGHLIGHTS: row[4]}
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wiki Summary."""
18
+
19
+
20
+ import csv
21
+
22
+ import datasets
23
+
24
+
25
+ _DESCRIPTION = """\
26
+ The dataset extracted from Persian Wikipedia into the form of articles and highlights and cleaned the dataset into pairs of articles and highlights and reduced the articles' length (only version 1.0.0) and highlights' length to a maximum of 512 and 128, respectively, suitable for parsBERT.
27
+ """
28
+
29
+ _CITATION = """\
30
+ @misc{Bert2BertWikiSummaryPersian,
31
+ author = {Mehrdad Farahani},
32
+ title = {Summarization using Bert2Bert model on WikiSummary dataset},
33
+ year = {2020},
34
+ publisher = {GitHub},
35
+ journal = {GitHub repository},
36
+ howpublished = {https://github.com/m3hrdadfi/wiki-summary},
37
+ }
38
+ """
39
+
40
+ _ID = "id"
41
+ _LINK = "link"
42
+ _TITLE = "title"
43
+ _ARTICLE = "article"
44
+ _HIGHLIGHTS = "highlights"
45
+
46
+ _HOMEPAGE = "https://github.com/m3hrdadfi/wiki-summary"
47
+
48
+ _TRAIN_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-CaP3xHgZxOGjQ3pXC5tr9YnIajmel-t&export=download"
49
+ _TEST_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-9G4yYP6YO8oMA-o4cTe9NJpEyr7x5jg&export=download"
50
+ _DEV_DOWNLOAD_URL = "https://drive.google.com/u/0/uc?id=1-2g2gkDeNaN-vth-8Mgit_ovmSkVh91u&export=download"
51
+
52
+
53
+ class WikiSummary(datasets.GeneratorBasedBuilder):
54
+ """Wiki Summary"""
55
+
56
+ VERSION = datasets.Version("1.1.0")
57
+
58
+ def _info(self):
59
+ feature_names = [_ID, _LINK, _TITLE, _ARTICLE, _HIGHLIGHTS]
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features({k: datasets.Value("string") for k in feature_names}),
63
+ homepage=_HOMEPAGE,
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ path_train = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
69
+ path_test = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
70
+ path_dev = dl_manager.download_and_extract(_DEV_DOWNLOAD_URL)
71
+ return [
72
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path_train}),
73
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path_test}),
74
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": path_dev}),
75
+ ]
76
+
77
+ def _generate_examples(self, filepath):
78
+ """Generate Wiki summary examples."""
79
+ with open(filepath, encoding="utf8") as csv_file:
80
+ csv_reader = csv.reader(
81
+ csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
82
+ )
83
+ for id_, row in enumerate(csv_reader):
84
+ if len(row) == 5:
85
+ yield id_, {_ID: row[0], _LINK: row[1], _TITLE: row[2], _ARTICLE: row[3], _HIGHLIGHTS: row[4]}