File size: 11,385 Bytes
4b86302
 
 
f874d2e
5fc19f9
edc5e1c
4b86302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0a3e21f
4b86302
 
 
 
 
6263356
4b86302
 
 
 
 
 
 
 
 
 
 
5fc19f9
4b86302
 
 
f874d2e
4b86302
 
f874d2e
30ec3fe
 
 
 
 
 
 
 
 
4b86302
 
 
 
 
 
f874d2e
 
 
 
 
 
 
 
a8a6b5c
 
4b86302
f874d2e
4b86302
 
d8b66c6
30ec3fe
 
 
 
4b86302
30ec3fe
 
 
 
 
 
 
 
 
c0e605c
30ec3fe
4b86302
30ec3fe
 
 
 
 
 
 
 
 
 
 
4b86302
30ec3fe
4b86302
 
 
 
 
d8b66c6
 
30ec3fe
c0e605c
30ec3fe
4b86302
30ec3fe
4b86302
 
 
30ec3fe
4b86302
 
f874d2e
30ec3fe
 
 
f874d2e
 
30ec3fe
 
f874d2e
 
 
30ec3fe
f874d2e
 
 
30ec3fe
 
 
 
 
 
 
 
 
 
f874d2e
edc5e1c
30ec3fe
 
edc5e1c
f874d2e
 
 
30ec3fe
f874d2e
 
 
30ec3fe
f874d2e
 
6f9cbea
f874d2e
30ec3fe
f874d2e
 
30ec3fe
f874d2e
 
 
 
6f9cbea
aacff4b
 
 
 
 
802ae53
aacff4b
 
 
 
 
f874d2e
 
 
 
ca850ad
f874d2e
 
 
 
 
 
 
 
 
 
ca850ad
f874d2e
 
 
ca850ad
f874d2e
 
 
cc5ed83
f874d2e
 
 
30ec3fe
ca850ad
f874d2e
ca850ad
f874d2e
 
 
 
 
30ec3fe
cc5ed83
f874d2e
edc5e1c
f874d2e
a8a6b5c
f874d2e
 
 
a8a6b5c
f874d2e
a8a6b5c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import urllib.parse

import datasets
import numpy as np
import pandas as pd
import requests

_CITATION = """\
@inproceedings{Wu2020not,
    title={Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision},
    author={Wu, Peng and Liu, jing and Shi, Yujia and Sun, Yujia and Shao, Fangtao and Wu, Zhaoyang and Yang, Zhiwei},
    booktitle={European Conference on Computer Vision (ECCV)},
    year={2020}
}
"""

_DESCRIPTION = """\
Dataset for the paper "Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision". \
The dataset is downloaded from the authors' website (https://roc-ng.github.io/XD-Violence/). Hosting this dataset on HuggingFace \
is just to make it easier for my own project to use this dataset. Please cite the original paper if you use this dataset.
"""

_NAME = "xd-violence"

_HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}"

_LICENSE = "MIT"

_URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/data/"


class XDViolenceConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        """BuilderConfig for XD-Violence.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(XDViolenceConfig, self).__init__(**kwargs)


class XDViolence(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        XDViolenceConfig(
            name="video",
            description="Video dataset.",
        ),
        XDViolenceConfig(
            name="i3d_rgb",
            description="RGB features of the dataset extracted with pretrained I3D ResNet50 model (Pre-trained on ImageNet-1k; Transfer learning on Kinetics-400 dataset).",
        ),
        XDViolenceConfig(
            name="swin_rgb",
            description="RGB features of the dataset extracted with pretrained Video Swin Transformer model (Pre-trained on ImageNet-1k; Transfer learning on Kinetics-400 dataset).",
        ),
        XDViolenceConfig(
            name="c3d_rgb",
            description="RGB features of the dataset extracted with pretrained C3D model (Pre-trained on Sports-1M; Transfer learning on UCF-101 dataset).",
        ),
    ]

    DEFAULT_CONFIG_NAME = "video"
    BUILDER_CONFIG_CLASS = XDViolenceConfig

    CODE2IDX = {
        "A": 0,  # Normal
        "B1": 1,  # Fighting
        "B2": 2,  # Shooting
        "B4": 3,  # Riot
        "B5": 4,  # Abuse
        "B6": 5,  # Car accident
        "G": 6,  # Explosion
    }

    def _info(self):
        if self.config.name == "i3d_rgb":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "feature": datasets.Array2D(shape=(None, 2048), dtype="float32"),  # (num_frames, feature_dim)
                    "binary_target": datasets.ClassLabel(names=["Non-violence", "Violence"]),
                    "multilabel_target": datasets.Sequence(
                        datasets.ClassLabel(names=["Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion"])
                    ),
                    "frame_annotations": datasets.Sequence({"start": datasets.Value("int32"), "end": datasets.Value("int32")}),
                }
            )
        elif self.config.name == "swin_rgb":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "feature": datasets.Array2D(shape=(None, 768), dtype="float32"),  # (num_frames, feature_dim)
                    "binary_target": datasets.ClassLabel(names=["Non-violence", "Violence"]),
                    "multilabel_target": datasets.Sequence(
                        datasets.ClassLabel(names=["Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion"])
                    ),
                    "frame_annotations": datasets.Sequence({"start": datasets.Value("int32"), "end": datasets.Value("int32")}),
                }
            )
        elif self.config.name == "c3d_rgb":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "feature": datasets.Array2D(shape=(None, 4096), dtype="float32"),  # (num_frames, feature_dim)
                    "binary_target": datasets.ClassLabel(names=["Non-violence", "Violence"]),
                    "multilabel_target": datasets.Sequence(
                        datasets.ClassLabel(names=["Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion"])
                    ),
                    "frame_annotations": datasets.Sequence({"start": datasets.Value("int32"), "end": datasets.Value("int32")}),
                }
            )
        else:  # default = "video"
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "path": datasets.Value("string"),
                    "binary_target": datasets.ClassLabel(names=["Non-violence", "Violence"]),
                    "multilabel_target": datasets.Sequence(
                        datasets.ClassLabel(names=["Normal", "Fighting", "Shooting", "Riot", "Abuse", "Car accident", "Explosion"])
                    ),
                    "frame_annotations": datasets.Sequence({"start": datasets.Value("int32"), "end": datasets.Value("int32")}),
                }
            )

        return datasets.DatasetInfo(features=features, description=_DESCRIPTION, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)

    def _split_generators(self, dl_manager):
        # Download train list
        train_list_path = dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train_list.txt"))
        train_list = pd.read_csv(train_list_path, header=None, sep=" ", usecols=[0], names=["id"])["id"].apply(lambda x: x.rstrip(".mp4")).tolist()
        train_ids = [x.split("/")[1] for x in train_list]  # remove subfolder prefix, e.g., "1-1004"

        # Download test list
        test_list_path = dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "test_list.txt"))
        test_list = pd.read_csv(test_list_path, header=None, sep=" ", usecols=[0], names=["id"])["id"].apply(lambda x: x.rstrip(".mp4")).tolist()
        test_ids = [x.split("/")[1] for x in test_list]

        # Download test annotation file
        test_annotations_path = dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "test_annotations.txt"))

        if self.config.name == "i3d_rgb":
            # Download features
            train_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"i3d_rgb/{x}.npy"), safe=":/") for x in train_list])
            test_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"i3d_rgb/{x}.npy"), safe=":/") for x in test_list])
        elif self.config.name == "swin_rgb":
            # Download features
            train_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"swin_rgb/{x}.npy"), safe=":/") for x in train_list])
            test_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"swin_rgb/{x}.npy"), safe=":/") for x in test_list])
        elif self.config.name == "c3d_rgb":
            # Download features
            train_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"c3d_rgb/{x}.npy"), safe=":/") for x in train_list])
            test_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"c3d_rgb/{x}.npy"), safe=":/") for x in test_list])
        else:
            # Download videos
            train_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/") for x in train_list])
            test_paths = dl_manager.download([urllib.parse.quote(urllib.parse.urljoin(_URL, f"video/{x}.mp4"), safe=":/") for x in test_list])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"ids": train_ids, "paths": train_paths, "annotations_path": None},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"ids": test_ids, "paths": test_paths, "annotations_path": test_annotations_path},
            ),
        ]

    def _generate_examples(self, ids, paths, annotations_path):
        frame_annots_mapper = self._read_frame_annotations(annotations_path) if annotations_path else dict()
        labels = [self._extract_labels(f_id) for f_id in ids]  # Extract labels

        if self.config.name == "i3d_rgb" or self.config.name == "swin_rgb" or self.config.name == "c3d_rgb":
            for key, (f_id, f_path, f_label) in enumerate(zip(ids, paths, labels)):
                binary, multilabel = f_label
                frame_annotations = frame_annots_mapper.get(f_id, [])
                feature = np.load(f_path)

                for crop_idx in range(feature.shape[1]):  # Loop over crops (5 crops)
                    yield (
                        f"{key}-{crop_idx}",
                        {
                            "id": f"{f_id}__{crop_idx}",
                            "feature": np.squeeze(feature[:, crop_idx, :]).reshape((-1, feature.shape[-1])),
                            "binary_target": binary,
                            "multilabel_target": multilabel,
                            "frame_annotations": frame_annotations,
                        },
                    )
        else:
            for key, (f_id, f_path, f_label) in enumerate(zip(ids, paths, labels)):
                binary, multilabel = f_label
                frame_annotations = frame_annots_mapper.get(f_id, [])

                yield (
                    key,
                    {
                        "id": f_id,
                        "path": f_path,
                        "binary_target": binary,
                        "multilabel_target": multilabel,
                        "frame_annotations": frame_annotations,
                    },
                )

    def _read_frame_annotations(self, path):
        mapper = {}
        is_url = urllib.parse.urlparse(path).scheme in ("http", "https")

        if is_url:
            with requests.get(path, stream=True) as r:
                r.raise_for_status()

                for line in r.iter_lines():
                    parts = line.decode("utf-8").strip().split(" ")
                    f_id = parts[0].rstrip(".mp4")
                    frame_annotations = [{"start": parts[start_idx], "end": parts[start_idx + 1]} for start_idx in range(1, len(parts), 2)]

                    mapper[f_id] = frame_annotations

        else:
            with open(path, "r") as f:
                for line in f:
                    parts = line.strip().split(" ")
                    f_id = parts[0].rstrip(".mp4")
                    frame_annotations = [{"start": parts[start_idx], "end": parts[start_idx + 1]} for start_idx in range(1, len(parts), 2)]

                    mapper[f_id] = frame_annotations

        return mapper

    def _extract_labels(self, f_id):
        """Extracts labels from a given file id."""
        codes = f_id.split("_")[-1].split("-")
        binary = 1 if len(codes) > 1 else 0
        multilabel = [self.CODE2IDX[code] for code in codes if code != "0"]
        return binary, multilabel