parquet-converter commited on
Commit
f6e97b8
•
1 Parent(s): eccff1c

Update parquet files

Browse files
.gitattributes CHANGED
@@ -15,3 +15,8 @@
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
  *.gz filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
  *.gz filter=lfs diff=lfs merge=lfs -text
18
+ default/cnn_dailymail_nl-train-00001-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
19
+ default/cnn_dailymail_nl-train-00002-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
20
+ default/cnn_dailymail_nl-train-00000-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
21
+ default/cnn_dailymail_nl-validation.parquet filter=lfs diff=lfs merge=lfs -text
22
+ default/cnn_dailymail_nl-test.parquet filter=lfs diff=lfs merge=lfs -text
README.md DELETED
@@ -1,60 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - no-annotation
4
- language_creators:
5
- - found
6
- language:
7
- - nl
8
- license:
9
- - mit
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 100K<n<1M
14
- source_datasets:
15
- - https://github.com/huggingface/datasets/tree/master/datasets/cnn_dailymail
16
- task_categories:
17
- - conditional-text-generation
18
- task_ids:
19
- - summarization
20
- ---
21
- # Dataset Card for Dutch CNN Dailymail Dataset
22
-
23
- ## Dataset Description
24
-
25
- - **Repository:** [CNN / DailyMail Dataset NL repository](https://huggingface.co/datasets/ml6team/cnn_dailymail_nl)
26
-
27
- ### Dataset Summary
28
-
29
- The Dutch CNN / DailyMail Dataset is a machine-translated version of the English CNN / Dailymail dataset containing just over 300k unique news aticles as written by journalists at CNN and the Daily Mail.
30
-
31
- Most information about the dataset can be found on the [HuggingFace page](https://huggingface.co/datasets/cnn_dailymail) of the original English version.
32
-
33
- These are the basic steps used to create this dataset (+ some chunking):
34
- ```
35
- load_dataset("cnn_dailymail", '3.0.0')
36
- ```
37
- And this is the HuggingFace translation pipeline:
38
- ```
39
- pipeline(
40
- task='translation_en_to_nl',
41
- model='Helsinki-NLP/opus-mt-en-nl',
42
- tokenizer='Helsinki-NLP/opus-mt-en-nl')
43
- ```
44
-
45
- ### Data Fields
46
-
47
- - `id`: a string containing the heximal formated SHA1 hash of the url where the story was retrieved from
48
- - `article`: a string containing the body of the news article
49
- - `highlights`: a string containing the highlight of the article as written by the article author
50
-
51
- ### Data Splits
52
-
53
- The Dutch CNN/DailyMail dataset follows the same splits as the original English version and has 3 splits: _train_, _validation_, and _test_.
54
-
55
- | Dataset Split | Number of Instances in Split |
56
- | ------------- | ------------------------------------------- |
57
- | Train | 287,113 |
58
- | Validation | 13,368 |
59
- | Test | 11,490 |
60
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cnn_dailymail_nl.py DELETED
@@ -1,136 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CNN/Dailymail Dutch summarization dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
-
24
-
25
- _DESCRIPTION = """\
26
- This dataset is the CNN/Dailymail dataset translated to Dutch.
27
- This is the original dataset:
28
- ```
29
- load_dataset("cnn_dailymail", '3.0.0')
30
- ```
31
- And this is the HuggingFace translation pipeline:
32
- ```
33
- pipeline(
34
- task='translation_en_to_nl',
35
- model='Helsinki-NLP/opus-mt-en-nl',
36
- tokenizer='Helsinki-NLP/opus-mt-en-nl')
37
- ```
38
- """
39
-
40
- # The second citation introduces the source data, while the first
41
- # introduces the specific form (non-anonymized) we use here.
42
- _CITATION = """\
43
- @article{DBLP:journals/corr/SeeLM17,
44
- author = {Abigail See and
45
- Peter J. Liu and
46
- Christopher D. Manning},
47
- title = {Get To The Point: Summarization with Pointer-Generator Networks},
48
- journal = {CoRR},
49
- volume = {abs/1704.04368},
50
- year = {2017},
51
- url = {http://arxiv.org/abs/1704.04368},
52
- archivePrefix = {arXiv},
53
- eprint = {1704.04368},
54
- timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
55
- biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
56
- bibsource = {dblp computer science bibliography, https://dblp.org}
57
- }
58
-
59
- @inproceedings{hermann2015teaching,
60
- title={Teaching machines to read and comprehend},
61
- author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
62
- booktitle={Advances in neural information processing systems},
63
- pages={1693--1701},
64
- year={2015}
65
- }
66
- """
67
-
68
-
69
- _TRAIN_DOWNLOAD_URLS = [
70
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000000.csv.gz",
71
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000001.csv.gz",
72
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000002.csv.gz",
73
- ]
74
- _VALIDATION_DOWNLOAD_URL = "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_validation.csv.gz"
75
- _TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_test.csv.gz"
76
-
77
-
78
- _ID = "id"
79
- _HIGHLIGHTS = "highlights"
80
- _ARTICLE = "article"
81
-
82
-
83
- class CnnDailymailNl(datasets.GeneratorBasedBuilder):
84
- """CNN/Dailymail Dutch summarization dataset."""
85
-
86
- def _info(self):
87
- return datasets.DatasetInfo(
88
- description=_DESCRIPTION,
89
- features=datasets.Features(
90
- {
91
- _ARTICLE: datasets.Value("string"),
92
- _HIGHLIGHTS: datasets.Value("string"),
93
- _ID: datasets.Value("string"),
94
- }
95
- ),
96
- supervised_keys=None,
97
- homepage="https://huggingface.co/datasets/ml6team/cnn_dailymail_nl",
98
- citation=_CITATION,
99
- )
100
-
101
- def _split_generators(self, dl_manager):
102
- train_paths = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URLS)
103
- validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
104
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_paths}
108
- ),
109
- datasets.SplitGenerator(
110
- name=datasets.Split.VALIDATION,
111
- gen_kwargs={"filepaths": [validation_path]},
112
- ),
113
- datasets.SplitGenerator(
114
- name=datasets.Split.TEST, gen_kwargs={"filepaths": [test_path]}
115
- ),
116
- ]
117
-
118
- def _generate_examples(self, filepaths):
119
- """Generate Dutch CNN/Dailymail examples."""
120
- for filepath in filepaths: # training data is divided over multiple shards
121
- with open(filepath, encoding="utf-8") as csv_file:
122
- csv_reader = csv.reader(
123
- csv_file,
124
- quotechar='"',
125
- delimiter=",",
126
- quoting=csv.QUOTE_ALL,
127
- skipinitialspace=True,
128
- )
129
- next(csv_reader) # skip header
130
- for row in csv_reader:
131
- article_id, article, highlights = row
132
- yield article_id, {
133
- _ARTICLE: article,
134
- _HIGHLIGHTS: highlights,
135
- _ID: article_id,
136
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1,83 +0,0 @@
1
- {
2
- "default": {
3
- "description": " This dataset is the CNN/Dailymail dataset translated to Dutch.\n This is the original dataset:\n ```\n load_dataset(\"cnn_dailymail\", '3.0.0')\n ```\n And this is the HuggingFace translation pipeline: \n ```\n pipeline(\n task='translation_en_to_nl',\n model='Helsinki-NLP/opus-mt-en-nl',\n tokenizer='Helsinki-NLP/opus-mt-en-nl')\n ```\n",
4
- "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n",
5
- "homepage": "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl",
6
- "license": "",
7
- "features": {
8
- "article": {
9
- "dtype": "string",
10
- "id": null,
11
- "_type": "Value"
12
- },
13
- "highlights": {
14
- "dtype": "string",
15
- "id": null,
16
- "_type": "Value"
17
- },
18
- "id": {
19
- "dtype": "string",
20
- "id": null,
21
- "_type": "Value"
22
- }
23
- },
24
- "post_processed": null,
25
- "supervised_keys": null,
26
- "task_templates": null,
27
- "builder_name": "cnn_dailymail_nl",
28
- "config_name": "default",
29
- "version": {
30
- "version_str": "0.0.0",
31
- "description": null,
32
- "major": 0,
33
- "minor": 0,
34
- "patch": 0
35
- },
36
- "splits": {
37
- "train": {
38
- "name": "train",
39
- "num_bytes": 1354614404,
40
- "num_examples": 287113,
41
- "dataset_name": "cnn_dailymail_nl"
42
- },
43
- "validation": {
44
- "name": "validation",
45
- "num_bytes": 61857303,
46
- "num_examples": 13368,
47
- "dataset_name": "cnn_dailymail_nl"
48
- },
49
- "test": {
50
- "name": "test",
51
- "num_bytes": 53509170,
52
- "num_examples": 11490,
53
- "dataset_name": "cnn_dailymail_nl"
54
- }
55
- },
56
- "download_checksums": {
57
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000000.csv.gz": {
58
- "num_bytes": 169090535,
59
- "checksum": "96f60dd1d201f0c993114da364286fe36cf9ae49ad52ca856d25b0de7186389c"
60
- },
61
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000001.csv.gz": {
62
- "num_bytes": 169931930,
63
- "checksum": "37e4080fa85c445cf42e263492d3070c60584711a311d90fbc5411ad8f700c8d"
64
- },
65
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000002.csv.gz": {
66
- "num_bytes": 168246772,
67
- "checksum": "04423cfa8f773738c0f19d6fa696ab82df5138bdb77c73fcb48fa8528cca7d55"
68
- },
69
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_validation.csv.gz": {
70
- "num_bytes": 22598465,
71
- "checksum": "46bbe9a744a6a7c7e3bb47558f475b4a11db9aa25392ea9f023e3918653dbda5"
72
- },
73
- "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_test.csv.gz": {
74
- "num_bytes": 19612656,
75
- "checksum": "e092959ed03526b568a8281b8ca9109a451df40a1bf29c92aa27b20ec7ad0dff"
76
- }
77
- },
78
- "download_size": 549480358,
79
- "post_processing_size": null,
80
- "dataset_size": 1469980877,
81
- "size_in_bytes": 2019461235
82
- }
83
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cnn_dailymail_nl_validation.csv.gz → default/cnn_dailymail_nl-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46bbe9a744a6a7c7e3bb47558f475b4a11db9aa25392ea9f023e3918653dbda5
3
- size 22598465
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096a154ddbcf2662fff6c0e0f97c721edce8a181597192556d3db51fadccde93
3
+ size 31998670
cnn_dailymail_nl_train_000000000001.csv.gz → default/cnn_dailymail_nl-train-00000-of-00003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37e4080fa85c445cf42e263492d3070c60584711a311d90fbc5411ad8f700c8d
3
- size 169931930
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4f8540056cab357236c750014a8be5a09f34680fad7a06ebf394398278da23
3
+ size 307570982
cnn_dailymail_nl_train_000000000002.csv.gz → default/cnn_dailymail_nl-train-00001-of-00003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04423cfa8f773738c0f19d6fa696ab82df5138bdb77c73fcb48fa8528cca7d55
3
- size 168246772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e1e11a2ce3c7e2b74927098aeae0b1f6dba28e322fad919835aa1ae9fe88824
3
+ size 305501956
cnn_dailymail_nl_train_000000000000.csv.gz → default/cnn_dailymail_nl-train-00002-of-00003.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96f60dd1d201f0c993114da364286fe36cf9ae49ad52ca856d25b0de7186389c
3
- size 169090535
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c92df823fa2eb42dda0bbe551197d89139c2f580b890088ef074474956000e3
3
+ size 212686763
cnn_dailymail_nl_test.csv.gz → default/cnn_dailymail_nl-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e092959ed03526b568a8281b8ca9109a451df40a1bf29c92aa27b20ec7ad0dff
3
- size 19612656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:899b8a222eb316a1006ca7af2cad7de479687fd62e9f9abb6f7fb73db72df808
3
+ size 36980738
dummy/0.0.0/dummy_data.zip DELETED
Binary file (35.9 kB)