parquet-converter commited on
Commit
e912a6e
1 Parent(s): 780b46b

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ActivityNet_Captions.py DELETED
@@ -1,115 +0,0 @@
1
- # Lint as: python3
2
- """TGIF: A New Dataset and Benchmark on Animated GIF Description"""
3
-
4
- import os
5
- import json
6
- import datasets
7
-
8
- _CITATION = """
9
- @inproceedings{krishna2017dense,
10
- title={Dense-Captioning Events in Videos},
11
- author={Krishna, Ranjay and Hata, Kenji and Ren, Frederic and Fei-Fei, Li and Niebles, Juan Carlos},
12
- booktitle={International Conference on Computer Vision (ICCV)},
13
- year={2017}
14
- }
15
- """
16
-
17
- _DESCRIPTION = """\
18
- The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions.
19
- Each sentence covers an unique segment of the video, describing multiple events that occur. These events
20
- may occur over very long or short periods of time and are not limited in any capacity, allowing them to
21
- co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in
22
- a total of 100k sentences. We find that the number of sentences per video follows a relatively normal
23
- distribution. Furthermore, as the video duration increases, the number of sentences also increases.
24
- Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more
25
- details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials
26
- in the paper.
27
- """
28
-
29
- _URL_BASE = "https://cs.stanford.edu/people/ranjaykrishna/densevid/"
30
-
31
-
32
- class ActivityNetConfig(datasets.BuilderConfig):
33
- """BuilderConfig for ActivityNet Captions."""
34
-
35
- def __init__(self, **kwargs):
36
- super(ActivityNetConfig, self).__init__(
37
- version=datasets.Version("2.1.0", ""), **kwargs)
38
-
39
-
40
- class ActivityNet(datasets.GeneratorBasedBuilder):
41
-
42
- DEFAULT_CONFIG_NAME = "all"
43
- BUILDER_CONFIGS = [
44
- ActivityNetConfig(
45
- name="all", description="All the ActivityNet Captions dataset"),
46
- ]
47
-
48
- def _info(self):
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features(
52
- {
53
- "video_id": datasets.Value("string"),
54
- "video_path": datasets.Value("string"),
55
- "duration": datasets.Value("float32"),
56
- "captions_starts": datasets.features.Sequence(datasets.Value("float32")),
57
- "captions_ends": datasets.features.Sequence(datasets.Value("float32")),
58
- "en_captions": datasets.features.Sequence(datasets.Value("string"))
59
- }
60
- ),
61
- supervised_keys=None,
62
- homepage=_URL_BASE,
63
- citation=_CITATION,
64
- )
65
-
66
- def _split_generators(self, dl_manager):
67
- archive_path = dl_manager.download_and_extract(
68
- _URL_BASE + "captions.zip")
69
-
70
- train_splits = [
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TRAIN,
73
- gen_kwargs={
74
- "infos_file": os.path.join(archive_path, "train.json")
75
- },
76
- )
77
- ]
78
- dev_splits = [
79
- datasets.SplitGenerator(
80
- name=datasets.Split.VALIDATION,
81
- gen_kwargs={
82
- "infos_file": os.path.join(archive_path, "val_1.json")
83
- },
84
- )
85
- ]
86
- test_splits = [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TEST,
89
- gen_kwargs={
90
- "infos_file": os.path.join(archive_path, "val_2.json")
91
- },
92
- )
93
- ]
94
- return train_splits + dev_splits + test_splits
95
-
96
- def _generate_examples(self, infos_file):
97
- """This function returns the examples."""
98
-
99
- with open(infos_file, encoding="utf-8") as json_file:
100
- infos = json.load(json_file)
101
- for idx, id in enumerate(infos):
102
- path = "https://www.youtube.com/watch?v=" + id[2:]
103
- starts = [float(timestamp[0])
104
- for timestamp in infos[id]["timestamps"]]
105
- ends = [float(timestamp[1])
106
- for timestamp in infos[id]["timestamps"]]
107
- captions = [str(caption) for caption in infos[id]["sentences"]]
108
- yield idx, {
109
- "video_id": id,
110
- "video_path": path,
111
- "duration": float(infos[id]["duration"]),
112
- "captions_starts": starts,
113
- "captions_ends": ends,
114
- "en_captions": captions,
115
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,97 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- language_creators:
5
- - crowdsourced
6
- language:
7
- - en
8
- license:
9
- - other
10
- multilinguality:
11
- - monolingual
12
- pretty_name: ActivityNet Captions
13
- size_categories:
14
- - 10k<n<100K
15
- source_datasets:
16
- - original
17
- task_categories:
18
- - video-captionning
19
- task_ids:
20
- - closed-domain-qa
21
- ---
22
-
23
-
24
- # Dataset Card for ActivityNet Captions
25
- ## Table of Contents
26
- - [Table of Contents](#table-of-contents)
27
- - [Dataset Description](#dataset-description)
28
- - [Dataset Summary](#dataset-summary)
29
- - [Languages](#languages)
30
- - [Dataset Structure](#dataset-structure)
31
- - [Data Fields](#data-fields)
32
- - [Data Splits](#data-splits)
33
- - [Dataset Creation](#dataset-creation)
34
-
35
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
36
- - [Considerations for Using the Data](#considerations-for-using-the-data)
37
- - [Social Impact of Dataset](#social-impact-of-dataset)
38
- - [Discussion of Biases](#discussion-of-biases)
39
- - [Other Known Limitations](#other-known-limitations)
40
- - [Additional Information](#additional-information)
41
- - [Licensing Information](#licensing-information)
42
- - [Citation Information](#citation-information)
43
- - [Contributions](#contributions)
44
- ## Dataset Description
45
- - **Homepage:** https://cs.stanford.edu/people/ranjaykrishna/densevid/
46
- - **Paper:** https://arxiv.org/abs/1705.00754
47
-
48
- ### Dataset Summary
49
- The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions. Each sentence covers an unique segment of the video, describing multiple events that occur. These events may occur over very long or short periods of time and are not limited in any capacity, allowing them to co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in a total of 100k sentences. We find that the number of sentences per video follows a relatively normal distribution. Furthermore, as the video duration increases, the number of sentences also increases. Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials in the paper.
50
- ### Languages
51
- The captions in the dataset are in English.
52
- ## Dataset Structure
53
- ### Data Fields
54
- - `video_id` : `str` unique identifier for the video
55
- - `video_path`: `str` Path to the video file
56
- -`duration`: `float32` Duration of the video
57
- - `captions_starts`: `List_float32` List of timestamps denoting the time at which each caption starts
58
- - `captions_ends`: `List_float32` List of timestamps denoting the time at which each caption ends
59
- - `en_captions`: `list_str` List of english captions describing parts of the video
60
-
61
- ### Data Splits
62
- | |train |validation| test | Overall |
63
- |-------------|------:|---------:|------:|------:|
64
- |# of videos|10,009 |4,917 |4,885 |19,811 |
65
- ### Annotations
66
- Quoting [ActivityNet Captions' paper](https://arxiv.org/abs/1705.00754): \
67
- "Each annotation task was divided into two steps: (1)
68
- Writing a paragraph describing all major events happening
69
- in the videos in a paragraph, with each sentence of the paragraph describing one event, and (2) Labeling the
70
- start and end time in the video in which each sentence in the
71
- paragraph event occurred."
72
- ### Who annotated the dataset?
73
- Amazon Mechnical Turk annotators
74
- ### Personal and Sensitive Information
75
- Nothing specifically mentioned in the paper.
76
- ## Considerations for Using the Data
77
- ### Social Impact of Dataset
78
- [More Information Needed]
79
- ### Discussion of Biases
80
- [More Information Needed]
81
- ### Other Known Limitations
82
- [More Information Needed]
83
- ## Additional Information
84
- ### Licensing Information
85
- [More Information Needed]
86
- ### Citation Information
87
- ```bibtex
88
- @InProceedings{tgif-cvpr2016,
89
- @inproceedings{krishna2017dense,
90
- title={Dense-Captioning Events in Videos},
91
- author={Krishna, Ranjay and Hata, Kenji and Ren, Frederic and Fei-Fei, Li and Niebles, Juan Carlos},
92
- booktitle={International Conference on Computer Vision (ICCV)},
93
- year={2017}
94
- }
95
- ```
96
- ### Contributions
97
- Thanks to [@leot13](https://github.com/leot13) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
all/activity_net_captions-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3dd10bd12864a3d8902b17591e3a9a0e25dc3abbcc348beb62efe4cbb51bd58
3
+ size 971393
all/activity_net_captions-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9d9e63c457382fbf29badb7b03d77f402d04f14166ad0bbd5235175cdf2a4c
3
+ size 2282807
all/activity_net_captions-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e59c1e5235418248579a8275413f424e49e9b18a1c3ef1607751887c9216b72
3
+ size 1072766