youtube-transcription / youtube-transcription.py
ashraq's picture
commit files to HF hub
35b98c1
import datasets
import json
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2021}
}
"""
_DESCRIPTION = """\
This is YouTube video transcription dataset built from YTTTS Speech Collection for semantic search.
"""
_HOMEPAGE = "https://huggingface.co/datasets/ashraq/youtube-transcription"
_LICENSE = ""
TRAIN_URL = "yt-transcriptions.jsonl"
class YoutubeTranscription(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.2.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"video_id": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"start_timestamp": datasets.Value("string"),
"end_timestamp": datasets.Value("string"),
"start_second": datasets.Value("string"),
"end_second": datasets.Value("string"),
"url": datasets.Value("string"),
"thumbnail": datasets.Value("string"),
},
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(TRAIN_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": train_path,
"split": "train",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf8") as f:
documents = f.readlines()
documents = [json.loads(x) for x in documents]
id_ = 0
for doc in documents:
data = {
"video_id": doc["video_id"],
"title": doc["title"],
"text": doc["text"],
"start_timestamp": doc["start_timestamp"],
"end_timestamp": doc["end_timestamp"],
"start_second": doc["start_second"],
"end_second": doc["end_second"],
"url": doc["url"],
"thumbnail": doc["thumbnail"]
}
yield id_, data
id_ += 1