|
import urllib.parse |
|
|
|
import datasets |
|
import pandas as pd |
|
import requests |
|
|
|
_CITATION = """\ |
|
@inproceedings{Wu2020not, |
|
title={Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision}, |
|
author={Wu, Peng and Liu, jing and Shi, Yujia and Sun, Yujia and Shao, Fangtao and Wu, Zhaoyang and Yang, Zhiwei}, |
|
booktitle={European Conference on Computer Vision (ECCV)}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Dataset for the paper "Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision". \ |
|
The dataset is downloaded from the authors' website (https://roc-ng.github.io/XD-Violence/). Hosting this dataset on HuggingFace \ |
|
is just to make it easier for my own project to use this dataset. Please cite the original paper if you use this dataset. |
|
""" |
|
|
|
_NAME = "xd-violence" |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}" |
|
|
|
_LICENSE = "MIT" |
|
|
|
_URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class XDViolenceConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for XD-Violence. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(XDViolenceConfig, self).__init__(**kwargs) |
|
|
|
|
|
class XDViolence(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
XDViolenceConfig( |
|
name="video", |
|
description="Video dataset", |
|
), |
|
XDViolenceConfig( |
|
name="rgb", |
|
description="RGB visual features of the video dataset", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "video" |
|
BUILDER_CONFIG_CLASS = XDViolenceConfig |
|
|
|
CODE2LABEL = { |
|
"A": "Normal", |
|
"B1": "Fighting", |
|
"B2": "Shooting", |
|
"B4": "Riot", |
|
"B5": "Abuse", |
|
"B6": "Car accident", |
|
"G": "Explosion", |
|
} |
|
|
|
LABEL2IDX = { |
|
"Normal": 0, |
|
"Fighting": 1, |
|
"Shooting": 2, |
|
"Riot": 3, |
|
"Abuse": 4, |
|
"Car accident": 5, |
|
"Explosion": 6, |
|
} |
|
|
|
def _info(self): |
|
if self.config.name == "rgb": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"rgb_feats": datasets.Array3D( |
|
shape=(None, 10, 2048), |
|
dtype="float32", |
|
), |
|
"binary_target": datasets.ClassLabel( |
|
names=["Non-violence", "Violence"] |
|
), |
|
"multilabel_targets": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"Normal", |
|
"Fighting", |
|
"Shooting", |
|
"Riot", |
|
"Abuse", |
|
"Car accident", |
|
"Explosion", |
|
] |
|
) |
|
), |
|
"frame_annotations": datasets.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"binary_target": datasets.ClassLabel( |
|
names=["Non-violence", "Violence"] |
|
), |
|
"multilabel_targets": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"Normal", |
|
"Fighting", |
|
"Shooting", |
|
"Riot", |
|
"Abuse", |
|
"Car accident", |
|
"Explosion", |
|
] |
|
) |
|
), |
|
"frame_annotations": datasets.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
features=features, |
|
description=_DESCRIPTION, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name == "rgb": |
|
raise NotImplementedError("rgb not implemented yet") |
|
else: |
|
|
|
annotation_urls = { |
|
"train": urllib.parse.urljoin(_URL, "train_list.txt"), |
|
"test": urllib.parse.urljoin(_URL, "test_annotations.txt"), |
|
} |
|
annotation_paths = { |
|
"train": dl_manager.download_and_extract(annotation_urls["train"]), |
|
"test": dl_manager.download_and_extract(annotation_urls["test"]), |
|
} |
|
|
|
|
|
video_urls = { |
|
"train": pd.read_csv( |
|
annotation_paths["train"], |
|
header=None, |
|
sep=" ", |
|
usecols=[0], |
|
names=["id"], |
|
)["id"] |
|
.apply( |
|
lambda x: urllib.parse.quote( |
|
urllib.parse.urljoin(_URL, f"video/{x.split('.mp4')[0]}.mp4"), safe=":/" |
|
) |
|
) |
|
.to_list(), |
|
"test": pd.read_csv( |
|
annotation_paths["test"], |
|
header=None, |
|
sep=" ", |
|
usecols=[0], |
|
names=["id"], |
|
)["id"] |
|
.apply( |
|
lambda x: urllib.parse.quote( |
|
urllib.parse.urljoin(_URL, f"video/test_videos/{x.split('.mp4')[0]}.mp4"), |
|
safe=":/", |
|
) |
|
) |
|
.to_list(), |
|
} |
|
video_paths = { |
|
"train": dl_manager.download(video_urls["train"]), |
|
"test": dl_manager.download(video_urls["test"]), |
|
} |
|
|
|
|
|
annotation_readers = { |
|
"train": self._read_train_list, |
|
"test": self._read_test_annotations, |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"annotation_path": annotation_paths["train"], |
|
"video_paths": video_paths["train"], |
|
"annotation_reader": annotation_readers["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"annotation_path": annotation_paths["test"], |
|
"video_paths": video_urls["test"], |
|
"annotation_reader": annotation_readers["test"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, annotation_path, video_paths, annotation_reader): |
|
if self.config.name == "rgb": |
|
raise NotImplementedError("rgb not implemented yet") |
|
else: |
|
ann_data = annotation_reader(annotation_path) |
|
|
|
for key, (path, annotation) in enumerate(zip(video_paths, ann_data)): |
|
id = annotation["id"] |
|
frame_annotations = annotation.get("frame_annotations", []) |
|
|
|
binary, multilabel = self.extract_labels(id) |
|
|
|
yield ( |
|
key, |
|
{ |
|
"id": id, |
|
"path": path, |
|
"binary_target": binary, |
|
"multilabel_targets": multilabel, |
|
"frame_annotations": frame_annotations, |
|
}, |
|
) |
|
|
|
@staticmethod |
|
def _read_train_list(path): |
|
"""Reads the train_list.txt file and returns a list of video ids.""" |
|
|
|
train_list = pd.read_csv(path, header=None, sep=" ", usecols=[0], names=["id"]) |
|
train_list["id"] = train_list["id"].apply( |
|
lambda x: x.split("/")[1].split(".mp4")[0] |
|
) |
|
return train_list.to_dict("records") |
|
|
|
@staticmethod |
|
def _read_test_annotations(path): |
|
"""Reads the test_annotations.txt file and returns a list of annotations.""" |
|
url_components = urllib.parse.urlparse(path) |
|
is_url = url_components.scheme == "https" or url_components.scheme == "http" |
|
|
|
annotations = [] |
|
|
|
if is_url: |
|
with requests.get(path, stream=True) as r: |
|
r.raise_for_status() |
|
|
|
for line in r.iter_lines(): |
|
parts = line.decode("utf-8").strip().split(" ") |
|
|
|
annotations.append( |
|
{ |
|
"id": parts[0].split(".mp4")[0], |
|
"frame_annotations": [ |
|
{"start": parts[start_idx], "end": parts[start_idx + 1]} |
|
for start_idx in range(1, len(parts), 2) |
|
], |
|
} |
|
) |
|
else: |
|
with open(path, "r") as f: |
|
for line in f: |
|
parts = line.strip().split(" ") |
|
|
|
annotations.append( |
|
{ |
|
"id": parts[0].split(".mp4")[0], |
|
"frame_annotations": [ |
|
{"start": parts[start_idx], "end": parts[start_idx + 1]} |
|
for start_idx in range(1, len(parts), 2) |
|
], |
|
} |
|
) |
|
|
|
return annotations |
|
|
|
@classmethod |
|
def extract_labels(cls, video_id): |
|
"""Extracts labels from the video id.""" |
|
codes = video_id.split("_")[-1].split(".mp4")[0].split("-") |
|
|
|
binary = 1 if len(codes) > 1 else 0 |
|
multilabel = [ |
|
cls.LABEL2IDX[cls.CODE2LABEL[code]] for code in codes if code != "0" |
|
] |
|
|
|
return binary, multilabel |
|
|