parquet-converter
commited on
Commit
•
3399d44
1
Parent(s):
35b98c1
Update parquet files
Browse files
.gitattributes
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
# Audio files - uncompressed
|
29 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
32 |
-
# Audio files - compressed
|
33 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yt-transcriptions.jsonl → default/youtube-transcription-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:863f10e91b4e5ac4a9415fd07464262e03da7a848d034713c9b55ff5e7f04f51
|
3 |
+
size 2549797
|
youtube-transcription.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
import datasets
|
2 |
-
import json
|
3 |
-
|
4 |
-
_CITATION = """\
|
5 |
-
@InProceedings{huggingface:dataset,
|
6 |
-
title = {A great new dataset},
|
7 |
-
author={huggingface, Inc.
|
8 |
-
},
|
9 |
-
year={2021}
|
10 |
-
}
|
11 |
-
"""
|
12 |
-
|
13 |
-
_DESCRIPTION = """\
|
14 |
-
This is YouTube video transcription dataset built from YTTTS Speech Collection for semantic search.
|
15 |
-
"""
|
16 |
-
_HOMEPAGE = "https://huggingface.co/datasets/ashraq/youtube-transcription"
|
17 |
-
|
18 |
-
_LICENSE = ""
|
19 |
-
|
20 |
-
TRAIN_URL = "yt-transcriptions.jsonl"
|
21 |
-
|
22 |
-
|
23 |
-
class YoutubeTranscription(datasets.GeneratorBasedBuilder):
|
24 |
-
VERSION = datasets.Version("1.2.0")
|
25 |
-
|
26 |
-
def _info(self):
|
27 |
-
return datasets.DatasetInfo(
|
28 |
-
description=_DESCRIPTION,
|
29 |
-
features=datasets.Features(
|
30 |
-
{
|
31 |
-
"video_id": datasets.Value("string"),
|
32 |
-
"title": datasets.Value("string"),
|
33 |
-
"text": datasets.Value("string"),
|
34 |
-
"start_timestamp": datasets.Value("string"),
|
35 |
-
"end_timestamp": datasets.Value("string"),
|
36 |
-
"start_second": datasets.Value("string"),
|
37 |
-
"end_second": datasets.Value("string"),
|
38 |
-
"url": datasets.Value("string"),
|
39 |
-
"thumbnail": datasets.Value("string"),
|
40 |
-
},
|
41 |
-
),
|
42 |
-
|
43 |
-
supervised_keys=None,
|
44 |
-
homepage=_HOMEPAGE,
|
45 |
-
citation=_CITATION,
|
46 |
-
)
|
47 |
-
|
48 |
-
def _split_generators(self, dl_manager):
|
49 |
-
train_path = dl_manager.download_and_extract(TRAIN_URL)
|
50 |
-
return [
|
51 |
-
datasets.SplitGenerator(
|
52 |
-
name=datasets.Split.TRAIN,
|
53 |
-
gen_kwargs={
|
54 |
-
"filepath": train_path,
|
55 |
-
"split": "train",
|
56 |
-
},
|
57 |
-
),
|
58 |
-
]
|
59 |
-
|
60 |
-
def _generate_examples(self, filepath, split):
|
61 |
-
with open(filepath, encoding="utf8") as f:
|
62 |
-
documents = f.readlines()
|
63 |
-
documents = [json.loads(x) for x in documents]
|
64 |
-
id_ = 0
|
65 |
-
for doc in documents:
|
66 |
-
data = {
|
67 |
-
"video_id": doc["video_id"],
|
68 |
-
"title": doc["title"],
|
69 |
-
"text": doc["text"],
|
70 |
-
"start_timestamp": doc["start_timestamp"],
|
71 |
-
"end_timestamp": doc["end_timestamp"],
|
72 |
-
"start_second": doc["start_second"],
|
73 |
-
"end_second": doc["end_second"],
|
74 |
-
"url": doc["url"],
|
75 |
-
"thumbnail": doc["thumbnail"]
|
76 |
-
}
|
77 |
-
yield id_, data
|
78 |
-
id_ += 1
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|