xd-violence / xd-violence.py
jherng's picture
Update xd-violence.py
9a955f9
raw
history blame
10.8 kB
import urllib.parse
import datasets
import pandas as pd
import requests
_CITATION = """\
@inproceedings{Wu2020not,
title={Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision},
author={Wu, Peng and Liu, jing and Shi, Yujia and Sun, Yujia and Shao, Fangtao and Wu, Zhaoyang and Yang, Zhiwei},
booktitle={European Conference on Computer Vision (ECCV)},
year={2020}
}
"""
_DESCRIPTION = """\
Dataset for the paper "Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision". \
The dataset is downloaded from the authors' website (https://roc-ng.github.io/XD-Violence/). Hosting this dataset on HuggingFace \
is just to make it easier for my own project to use this dataset. Please cite the original paper if you use this dataset.
"""
_NAME = "xd-violence"
_HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}"
_LICENSE = "MIT"
_URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/data/"
class XDViolenceConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
"""BuilderConfig for XD-Violence.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(XDViolenceConfig, self).__init__(**kwargs)
class XDViolence(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
XDViolenceConfig(
name="video",
description="Video dataset",
),
XDViolenceConfig(
name="rgb",
description="RGB visual features of the video dataset",
),
]
DEFAULT_CONFIG_NAME = "video"
BUILDER_CONFIG_CLASS = XDViolenceConfig
CODE2LABEL = {
"A": "Normal",
"B1": "Fighting",
"B2": "Shooting",
"B4": "Riot",
"B5": "Abuse",
"B6": "Car accident",
"G": "Explosion",
}
LABEL2IDX = {
"Normal": 0,
"Fighting": 1,
"Shooting": 2,
"Riot": 3,
"Abuse": 4,
"Car accident": 5,
"Explosion": 6,
}
def _info(self):
if self.config.name == "rgb":
features = datasets.Features(
{
"id": datasets.Value("string"),
"rgb_feats": datasets.Array3D(
shape=(None, 5, 2048),
dtype="float32", # (num_frames, num_crops, feature_dim) use 5 crops by default as of now
),
"binary_target": datasets.ClassLabel(
names=["Non-violence", "Violence"]
),
"multilabel_target": datasets.Sequence(
datasets.ClassLabel(
names=[
"Normal",
"Fighting",
"Shooting",
"Riot",
"Abuse",
"Car accident",
"Explosion",
]
)
),
"frame_annotations": datasets.Sequence(
{
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
}
),
}
)
else: # default = "video"
features = datasets.Features(
{
"id": datasets.Value("string"),
"path": datasets.Value("string"),
"binary_target": datasets.ClassLabel(
names=["Non-violence", "Violence"]
),
"multilabel_target": datasets.Sequence(
datasets.ClassLabel(
names=[
"Normal",
"Fighting",
"Shooting",
"Riot",
"Abuse",
"Car accident",
"Explosion",
]
)
),
"frame_annotations": datasets.Sequence(
{
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
}
),
}
)
return datasets.DatasetInfo(
features=features,
description=_DESCRIPTION,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "rgb":
raise NotImplementedError("rgb not implemented yet")
else:
# Download train and test list files
list_paths = {
"train": dl_manager.download_and_extract(
urllib.parse.urljoin(_URL, "train_list.txt")
),
"test": dl_manager.download_and_extract(
urllib.parse.urljoin(_URL, "test_list.txt")
),
}
# Download test annotation file
annotation_path = dl_manager.download_and_extract(
urllib.parse.urljoin(_URL, "test_annotations.txt")
)
# Download videos
video_urls = {
"train": pd.read_csv(
list_paths["train"],
header=None,
sep=" ",
usecols=[0],
names=["id"],
)["id"]
.apply(
lambda x: urllib.parse.quote(
urllib.parse.urljoin(_URL, f"video/{x.split('.mp4')[0]}.mp4"),
safe=":/",
)
)
.to_list(),
"test": pd.read_csv(
list_paths["test"],
header=None,
sep=" ",
usecols=[0],
names=["id"],
)["id"]
.apply(
lambda x: urllib.parse.quote(
urllib.parse.urljoin(_URL, f"video/{x.split('.mp4')[0]}.mp4"),
safe=":/",
)
)
.to_list(),
}
video_paths = {
"train": dl_manager.download(video_urls["train"]),
"test": dl_manager.download(video_urls["test"]),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"list_path": list_paths["train"],
"frame_annotation_path": None,
"video_paths": video_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"list_path": list_paths["test"],
"frame_annotation_path": annotation_path,
"video_paths": video_paths["test"],
},
),
]
def _generate_examples(self, list_path, frame_annotation_path, video_paths):
if self.config.name == "rgb":
raise NotImplementedError("rgb not implemented yet")
else:
ann_data = self._read_list(list_path, frame_annotation_path)
for key, (path, annotation) in enumerate(zip(video_paths, ann_data)):
id = annotation["id"]
binary = annotation["binary_target"]
multilabel = annotation["multilabel_target"]
frame_annotations = annotation.get("frame_annotations", [])
yield (
key,
{
"id": id,
"path": path,
"binary_target": binary,
"multilabel_target": multilabel,
"frame_annotations": frame_annotations,
},
)
@staticmethod
def _read_list(list_path, frame_annotation_path):
file_list = pd.read_csv(
list_path, header=None, sep=" ", usecols=[0], names=["id"]
)
file_list["id"] = file_list["id"].apply(
lambda x: x.split("/")[1].split(".mp4")[0]
)
file_list["binary_target"], file_list["multilabel_target"] = zip(
*file_list["id"].apply(XDViolence._extract_labels)
)
if frame_annotation_path: # test set
id2frame_annotation = {}
url_components = urllib.parse.urlparse(frame_annotation_path)
is_url = url_components.scheme in ("http", "https")
if is_url:
with requests.get(frame_annotation_path, stream=True) as r:
r.raise_for_status()
for line in r.iter_lines():
parts = line.decode("utf-8").strip().split(" ")
id = parts[0].split(".mp4")[0]
frame_annotation = [
{"start": parts[start_idx], "end": parts[start_idx + 1]}
for start_idx in range(1, len(parts), 2)
]
id2frame_annotation[id] = frame_annotation
else:
with open(frame_annotation_path, "r") as f:
for line in f:
parts = line.strip().split(" ")
id = parts[0].split(".mp4")[0]
frame_annotation = [
{"start": parts[start_idx], "end": parts[start_idx + 1]}
for start_idx in range(1, len(parts), 2)
]
id2frame_annotation[id] = frame_annotation
file_list["frame_annotations"] = file_list["id"].apply(
lambda x: id2frame_annotation[x] if x in id2frame_annotation else []
)
return file_list.to_dict("records")
@classmethod
def _extract_labels(cls, video_id):
"""Extracts labels from the video id."""
codes = video_id.split("_")[-1].split(".mp4")[0].split("-")
binary = 1 if len(codes) > 1 else 0
multilabel = [
cls.LABEL2IDX[cls.CODE2LABEL[code]] for code in codes if code != "0"
]
return binary, multilabel