Shubham Gupta commited on
Commit
4b6dede
1 Parent(s): 1eecda5

Add initial implementation of lexicap

Browse files
Files changed (1) hide show
  1. lexicap.py +57 -0
lexicap.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """Lexicap: Lex Friedman Podcast Whisper Captions."""
4
+
5
+
6
+ import csv
7
+
8
+ import datasets
9
+
10
+
11
+ _CITATION = """\
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ Lexicap contains the captions for every Lex Friedman Podcast episode. It it created by [Dr. Andrej Karpathy](https://twitter.com/karpathy).
16
+ There are 430 caption files available. There are 2 types of files:
17
+ - large
18
+ - small
19
+ Each file name follows the format `episode_{episode_number}_{file_type}.vtt`.
20
+ """
21
+
22
+
23
+
24
+ class LexicapConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for Lexicap."""
26
+
27
+ def __init__(self, **kwargs):
28
+ """Constructs a LexicapConfig.
29
+ Args:
30
+ **kwargs: keyword arguments forwarded to super.
31
+ """
32
+ super(LexicapConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs),
33
+
34
+
35
+ class Lexicap(datasets.GeneratorBasedBuilder):
36
+ """Lexicap dataset."""
37
+
38
+ BUILDER_CONFIGS = [
39
+ LexicapConfig( # pylint: disable=g-complex-comprehension
40
+ name=config_name,
41
+ description=(
42
+ f"A dataset consisting of captions for every Lex Friedman Podcast episode, generated using OpenAI Whisper. This dataset is created by [Dr. Andrej Karpathy](https://twitter.com/karpathy).
43
+ ),
44
+ )
45
+ for config_name in _DATA_OPTIONS
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION
51
+ )
52
+
53
+ def _split_generators(self, dl_manager):
54
+ # There is no predefined train/val/test split for this dataset.
55
+ return [
56
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_path": 'vtt'}),
57
+ ]