albertvillanova HF staff commited on
Commit
a503cf4
1 Parent(s): e413f33

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (78c90c57c34c341dd1cd1ddf3bb874c7904be2ed)
- Delete loading script (745bd03e6ac83d41bf4c7eb697f741581367da43)

README.md CHANGED
@@ -46,10 +46,15 @@ dataset_info:
46
  '8': RESERVED_DIFF
47
  splits:
48
  - name: train
49
- num_bytes: 355978939
50
  num_examples: 10769
51
- download_size: 222479455
52
- dataset_size: 355978939
 
 
 
 
 
53
  ---
54
 
55
  # Dataset Card for YouTube Caption Corrections
 
46
  '8': RESERVED_DIFF
47
  splits:
48
  - name: train
49
+ num_bytes: 355978891
50
  num_examples: 10769
51
+ download_size: 49050406
52
+ dataset_size: 355978891
53
+ configs:
54
+ - config_name: default
55
+ data_files:
56
+ - split: train
57
+ path: data/train-*
58
  ---
59
 
60
  # Dataset Card for YouTube Caption Corrections
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2325b0cebcbf84526cb2bdba9e4550b2fac6bc0dc832ef7b4ac08711b1ab6682
3
+ size 49050406
youtube_caption_corrections.py DELETED
@@ -1,104 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Dataset built from <auto-generated, manually corrected> caption pairs of
16
- YouTube videos with labels capturing the differences between the two."""
17
-
18
-
19
- import json
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = ""
25
-
26
- _DESCRIPTION = """\
27
- Dataset built from pairs of YouTube captions where both 'auto-generated' and
28
- 'manually-corrected' captions are available for a single specified language.
29
- This dataset labels two-way (e.g. ignoring single-sided insertions) same-length
30
- token differences in the `diff_type` column. The `default_seq` is composed of
31
- tokens from the 'auto-generated' captions. When a difference occurs between
32
- the 'auto-generated' vs 'manually-corrected' captions types, the `correction_seq`
33
- contains tokens from the 'manually-corrected' captions.
34
- """
35
-
36
- _LICENSE = "MIT License"
37
-
38
- _RELEASE_TAG = "v1.0"
39
- _NUM_FILES = 4
40
- _URLS = [
41
- f"https://raw.githubusercontent.com/2dot71mily/youtube_captions_corrections/{_RELEASE_TAG}/data/transcripts/en/split/youtube_caption_corrections_{i}.json"
42
- for i in range(_NUM_FILES)
43
- ]
44
-
45
-
46
- class YoutubeCaptionCorrections(datasets.GeneratorBasedBuilder):
47
- """YouTube captions corrections."""
48
-
49
- def _info(self):
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=datasets.Features(
53
- {
54
- "video_ids": datasets.Value("string"),
55
- "default_seq": datasets.Sequence(datasets.Value("string")),
56
- "correction_seq": datasets.Sequence(datasets.Value("string")),
57
- "diff_type": datasets.Sequence(
58
- datasets.features.ClassLabel(
59
- names=[
60
- "NO_DIFF",
61
- "CASE_DIFF",
62
- "PUNCUATION_DIFF",
63
- "CASE_AND_PUNCUATION_DIFF",
64
- "STEM_BASED_DIFF",
65
- "DIGIT_DIFF",
66
- "INTRAWORD_PUNC_DIFF",
67
- "UNKNOWN_TYPE_DIFF",
68
- "RESERVED_DIFF",
69
- ]
70
- )
71
- ),
72
- }
73
- ),
74
- supervised_keys=("correction_seq", "diff_type"),
75
- homepage="https://github.com/2dot71mily/youtube_captions_corrections",
76
- license=_LICENSE,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- downloaded_filepaths = dl_manager.download_and_extract(_URLS)
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TRAIN,
85
- gen_kwargs={"filepaths": downloaded_filepaths},
86
- ),
87
- ]
88
-
89
- def _generate_examples(self, filepaths):
90
- """Yields examples."""
91
- for file_idx, fp in enumerate(filepaths):
92
- with open(fp, "r", encoding="utf-8") as json_file:
93
- json_lists = list(json_file)
94
- for line_idx, json_list_str in enumerate(json_lists):
95
- json_list = json.loads(json_list_str)
96
-
97
- for ctr_idx, result in enumerate(json_list):
98
- response = {
99
- "video_ids": result["video_ids"],
100
- "diff_type": result["diff_type"],
101
- "default_seq": result["default_seq"],
102
- "correction_seq": result["correction_seq"],
103
- }
104
- yield f"{file_idx}_{line_idx}_{ctr_idx}", response