Datasets:
Leyo
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
10k<n<100K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
File size: 4,469 Bytes
f9d7b7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37eaf3e
f9d7b7c
37eaf3e
 
 
f9d7b7c
 
 
 
 
 
 
 
f73601d
 
f9d7b7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232069e
f9d7b7c
f0ec537
f73601d
 
232069e
f73601d
f0ec537
f73601d
f0ec537
 
 
f73601d
 
37eaf3e
f0ec537
37eaf3e
 
 
f73601d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# Lint as: python3
"""TGIF: A New Dataset and Benchmark on Animated GIF Description"""

import os
import json
import datasets

_CITATION = """
@inproceedings{krishna2017dense,
    title={Dense-Captioning Events in Videos},
    author={Krishna, Ranjay and Hata, Kenji and Ren, Frederic and Fei-Fei, Li and Niebles, Juan Carlos},
    booktitle={International Conference on Computer Vision (ICCV)},
    year={2017}
}
"""

_DESCRIPTION = """\
The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions.
Each sentence covers an unique segment of the video, describing multiple events that occur. These events
may occur over very long or short periods of time and are not limited in any capacity, allowing them to 
co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in
a total of 100k sentences. We find that the number of sentences per video follows a relatively normal
distribution. Furthermore, as the video duration increases, the number of sentences also increases. 
Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more
details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials 
in the paper.
"""

_URL_BASE = "https://cs.stanford.edu/people/ranjaykrishna/densevid/"


class ActivityNetConfig(datasets.BuilderConfig):
    """BuilderConfig for ActivityNet Captions."""

    def __init__(self, **kwargs):
        super(ActivityNetConfig, self).__init__(
            version=datasets.Version("2.1.0", ""), **kwargs)


class ActivityNet(datasets.GeneratorBasedBuilder):

    DEFAULT_CONFIG_NAME = "all"
    BUILDER_CONFIGS = [
        ActivityNetConfig(
            name="all", description="All the ActivityNet Captions dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "video_id": datasets.Value("string"),
                    "video_path": datasets.Value("string"),
                    "duration": datasets.Value("float32"),
                    "captions_starts": datasets.features.Sequence(datasets.Value("float32")),
                    "captions_ends": datasets.features.Sequence(datasets.Value("float32")),
                    "en_captions": datasets.features.Sequence(datasets.Value("string"))
                }
            ),
            supervised_keys=None,
            homepage=_URL_BASE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        archive_path = dl_manager.download_and_extract(
            _URL_BASE + "captions.zip")

        train_splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "infos_file": os.path.join(archive_path, "train.json")
                },
            )
        ]
        dev_splits = [
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "infos_file": os.path.join(archive_path, "val_1.json")
                },
            )
        ]
        test_splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "infos_file": os.path.join(archive_path, "val_2.json")
                },
            )
        ]
        return train_splits + dev_splits + test_splits

    def _generate_examples(self, infos_file):
        """This function returns the examples."""

        with open(infos_file, encoding="utf-8") as json_file:
            infos = json.load(json_file)
            for idx, id in enumerate(infos):
                path = "https://www.youtube.com/watch?v=" + id[2:]
                starts = [float(timestamp[0])
                          for timestamp in infos[id]["timestamps"]]
                ends = [float(timestamp[1])
                        for timestamp in infos[id]["timestamps"]]
                captions = [str(caption) for caption in infos[id]["sentences"]]
                yield idx, {
                    "video_id": id,
                    "video_path": path,
                    "duration": float(infos[id]["duration"]),
                    "captions_starts": starts,
                    "captions_ends": ends,
                    "en_captions": captions,
                }