File size: 2,534 Bytes
d049b3a
3bb0a73
d049b3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3262396
35b98c1
d049b3a
 
 
 
 
 
 
35b98c1
d049b3a
 
 
 
 
35b98c1
 
d049b3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d48bad8
 
d049b3a
3bb0a73
d049b3a
 
35b98c1
d049b3a
 
 
 
 
35b98c1
 
d049b3a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import datasets
import json

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2021}
}
"""

_DESCRIPTION = """\
This is YouTube video transcription dataset built from YTTTS Speech Collection for semantic search.
"""
_HOMEPAGE = "https://huggingface.co/datasets/ashraq/youtube-transcription"

_LICENSE = ""

TRAIN_URL = "yt-transcriptions.jsonl"


class YoutubeTranscription(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.2.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "video_id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "start_timestamp": datasets.Value("string"),
                    "end_timestamp": datasets.Value("string"),
                    "start_second": datasets.Value("string"),
                    "end_second": datasets.Value("string"),
                    "url": datasets.Value("string"),
                    "thumbnail": datasets.Value("string"),
                },
            ),
            
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        train_path = dl_manager.download_and_extract(TRAIN_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": train_path,
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath, split):
        with open(filepath, encoding="utf8") as f:
            documents = f.readlines()
            documents = [json.loads(x) for x in documents]
            id_ = 0
            for doc in documents:
                data = {
                    "video_id": doc["video_id"],
                    "title": doc["title"],
                    "text": doc["text"],
                    "start_timestamp": doc["start_timestamp"],
                    "end_timestamp": doc["end_timestamp"],
                    "start_second": doc["start_second"],
                    "end_second": doc["end_second"],
                    "url": doc["url"],
                    "thumbnail": doc["thumbnail"]
                }
                yield id_, data
                id_ += 1