Leyo HF staff commited on
Commit
f34431e
1 Parent(s): 232069e

delete useless files

Browse files
Files changed (2) hide show
  1. ActivityNet_Captions +0 -127
  2. captions.tar.gz +0 -3
ActivityNet_Captions DELETED
@@ -1,127 +0,0 @@
1
- # Lint as: python3
2
- """TGIF: A New Dataset and Benchmark on Animated GIF Description"""
3
-
4
- import os
5
- import json
6
- import datasets
7
-
8
- _CITATION = """
9
- @inproceedings{krishna2017dense,
10
- title={Dense-Captioning Events in Videos},
11
- author={Krishna, Ranjay and Hata, Kenji and Ren, Frederic and Fei-Fei, Li and Niebles, Juan Carlos},
12
- booktitle={International Conference on Computer Vision (ICCV)},
13
- year={2017}
14
- }
15
- """
16
-
17
- _DESCRIPTION = """\
18
- The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions.
19
- Each sentence covers an unique segment of the video, describing multiple events that occur. These events
20
- may occur over very long or short periods of time and are not limited in any capacity, allowing them to
21
- co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in
22
- a total of 100k sentences. We find that the number of sentences per video follows a relatively normal
23
- distribution. Furthermore, as the video duration increases, the number of sentences also increases.
24
- Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more
25
- details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials
26
- in the paper.
27
- """
28
-
29
- _URL_BASE = "https://cs.stanford.edu/people/ranjaykrishna/densevid/"
30
-
31
- _DL_URL = "https://huggingface.co/datasets/Leyo/ActivityNet_Captions/resolve/main/captions.tar.gz"
32
-
33
- class ActivityNetConfig(datasets.BuilderConfig):
34
- """BuilderConfig for ActivityNet Captions."""
35
-
36
- def __init__(self, **kwargs):
37
- super(ActivityNetConfig, self).__init__(
38
- version=datasets.Version("2.1.0", ""), **kwargs)
39
-
40
-
41
- class ActivityNet(datasets.GeneratorBasedBuilder):
42
-
43
- DEFAULT_CONFIG_NAME = "all"
44
- BUILDER_CONFIGS = [
45
- ActivityNetConfig(
46
- name="all", description="All the ActivityNet Captions dataset"),
47
- ]
48
-
49
- def _info(self):
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=datasets.Features(
53
- {
54
- "video_id": datasets.Value("string"),
55
- "path": datasets.Value("string"),
56
- "duration": datasets.Value("float32"),
57
- "starts": datasets.features.Sequence(datasets.Value("float32")),
58
- "ends": datasets.features.Sequence(datasets.Value("float32")),
59
- "captions": datasets.features.Sequence(datasets.Value("string"))
60
- }
61
- ),
62
- supervised_keys=None,
63
- homepage=_URL_BASE,
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- archive_path = dl_manager.download(_DL_URL)
69
-
70
- train_splits = [
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TRAIN,
73
- gen_kwargs={
74
- "files": dl_manager.iterable(archive_path),
75
- "ids_file": os.path.join(archive_path, "train_ids.json"),
76
- "infos_file": os.path.join(archive_path, "train.json")
77
- },
78
- )
79
- ]
80
- dev_splits = [
81
- datasets.SplitGenerator(
82
- name=datasets.Split.VALIDATION,
83
- gen_kwargs={
84
- "files": dl_manager.iterable(archive_path),
85
- "ids_file": os.path.join(archive_path, "val_ids.json"),
86
- "infos_file": os.path.join(archive_path, "val_1.json")
87
- },
88
- )
89
- ]
90
- test_splits = [
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST,
93
- gen_kwargs={
94
- "files": dl_manager.iterable(archive_path),
95
- "ids_file": os.path.join(archive_path, "test_ids.json"),
96
- "infos_file": os.path.join(archive_path, "val_2.json")
97
- },
98
- )
99
- ]
100
- return train_splits + dev_splits + test_splits
101
-
102
- def _generate_examples(self, files, ids_file, infos_file):
103
- """This function returns the examples."""
104
-
105
- for path, f in files:
106
- if path == infos_file:
107
- with open(f, encoding="utf-8") as json_file:
108
- infos = json.load(json_file)
109
- for path, f in files:
110
- if path == ids_file:
111
- with open(f, encoding="utf-8") as json_file:
112
- ids = json.load(json_file)
113
- for idx, id in enumerate(ids):
114
- path = "https://www.youtube.com/watch?v=" + id[2:]
115
- starts = [timestamp[0]
116
- for timestamp in infos[id]["timestamps"]]
117
- ends = [timestamp[1] for timestamp in infos[id]["timestamps"]]
118
- yield idx, {
119
- "video_id": id,
120
- "path": path,
121
- "video_id": datasets.Value("string"),
122
- "path": datasets.Value("string"),
123
- "duration": infos[id]["duration"],
124
- "starts": starts,
125
- "ends": ends,
126
- "captions": infos[id]["sentences"],
127
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
captions.tar.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc829f081096b8a4cd2271ca54df37d28ba44a56b94330c0dc1a9c1e7f8ba854
3
- size 2664968