Add Batch b906ba89-5572-4e6d-8ed6-4f227a5471d8
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_content_list.json +3 -0
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_model.json +3 -0
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_origin.pdf +3 -0
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/full.md +324 -0
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/images.zip +3 -0
- abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/layout.json +3 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_content_list.json +3 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_model.json +3 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_origin.pdf +3 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/full.md +323 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/images.zip +3 -0
- accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/layout.json +3 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_content_list.json +3 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_model.json +3 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_origin.pdf +3 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/full.md +435 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/images.zip +3 -0
- achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/layout.json +3 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_content_list.json +3 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_model.json +3 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_origin.pdf +3 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/full.md +342 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/images.zip +3 -0
- acneattentivecontextnormalizationforrobustpermutationequivariantlearning/layout.json +3 -0
- actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_content_list.json +3 -0
- actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_model.json +3 -0
- actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_origin.pdf +3 -0
- actbertlearninggloballocalvideotextrepresentations/full.md +255 -0
- actbertlearninggloballocalvideotextrepresentations/images.zip +3 -0
- actbertlearninggloballocalvideotextrepresentations/layout.json +3 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_content_list.json +3 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_model.json +3 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_origin.pdf +3 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/full.md +260 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/images.zip +3 -0
- actionbyteslearningfromtrimmedvideostolocalizeactions/layout.json +3 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_content_list.json +3 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_model.json +3 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_origin.pdf +3 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/full.md +288 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/images.zip +3 -0
- actiongenomeactionsascompositionsofspatiotemporalscenegraphs/layout.json +3 -0
- actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_content_list.json +3 -0
- actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_model.json +3 -0
- actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_origin.pdf +3 -0
- actionmodifierslearningfromadverbsininstructionalvideos/full.md +344 -0
- actionmodifierslearningfromadverbsininstructionalvideos/images.zip +3 -0
- actionmodifierslearningfromadverbsininstructionalvideos/layout.json +3 -0
- actionsegmentationwithjointselfsupervisedtemporaldomainadaptation/2e7f2736-36f2-4099-8fc8-db04ec170c4f_content_list.json +3 -0
- actionsegmentationwithjointselfsupervisedtemporaldomainadaptation/2e7f2736-36f2-4099-8fc8-db04ec170c4f_model.json +3 -0
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bcd078b49962938b8630e185377edfa51ab599dfdddfabd3efaff9045f312eaa
|
| 3 |
+
size 74041
|
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad8d07895e8085ea61339d81ad95fd7f60bad1f26a0a2c7bb515d2ddcc7ad43c
|
| 3 |
+
size 90998
|
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/bc67d115-01c1-4a35-9229-709c2970d9bb_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67ee52e4dfd9fe1e4fa543c16ffb7571f45dae31a5f5e9ae9372a83407c24667
|
| 3 |
+
size 6321184
|
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/full.md
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ABCNet: Real-time Scene Text Spotting with Adaptive Bezier-Curve Network*
|
| 2 |
+
|
| 3 |
+
Yuliang Liu†, Hao Chen†, Chunhua Shen†, Tong He†, Lianwen Jin‡, Liangwei Wang‡
|
| 4 |
+
‡South China University of Technology †University of Adelaide, Australia †Huawei Noah's Ark Lab
|
| 5 |
+
|
| 6 |
+
# Abstract
|
| 7 |
+
|
| 8 |
+
Scene text detection and recognition has received increasing research attention. Existing methods can be roughly categorized into two groups: character-based and segmentation-based. These methods either are costly for character annotation or need to maintain a complex pipeline, which is often not suitable for real-time applications. Here we address the problem by proposing the Adaptive Bezier-Curve Network (ABCNet). Our contributions are three-fold: 1) For the first time, we adaptively fit oriented or curved text by a parameterized Bezier curve. 2) We design a novel BezierAlign layer for extracting accurate convolution features of a text instance with arbitrary shapes, significantly improving the precision compared with previous methods. 3) Compared with standard bounding box detection, our Bezier curve detection introduces negligible computation overhead, resulting in superiority of our method in both efficiency and accuracy.
|
| 9 |
+
|
| 10 |
+
Experiments on oriented or curved benchmark datasets, namely Total-Text and CTW1500, demonstrate that ABCNet achieves state-of-the-art accuracy, meanwhile significantly improving the speed. In particular, on Total-Text, our real-time version is over 10 times faster than recent state-of-the-art methods with a competitive recognition accuracy.
|
| 11 |
+
|
| 12 |
+
Code is available at https://git.io/AdelaiDet.
|
| 13 |
+
|
| 14 |
+
# 1. Introduction
|
| 15 |
+
|
| 16 |
+
Scene text detection and recognition has received increasing attention due to its numerous applications in computer vision. Despite tremendous progress has been made recently [10, 42, 28, 36, 27, 43, 45, 41, 46, 14], detecting and recognizing text in the wild remains largely unsolved due to its diversity patterns in sizes, aspect ratios, font styles, perspective distortion, and shapes. Although the emergence of deep learning has significantly improved the performance of the task of scene text spotting, a considerable gap still exists in current methods for real-world
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
(a) Segmentation-based method.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
(b) Our proposed ABCNet.
|
| 31 |
+
Figure 1. Segmentation-based results are easily affected by nearby text. The nonparametric non-structured segmentation results make them very difficult to align features for the subsequent recognition branch. Segmentation-based results usually need complex post-processing, hampering efficiency. Benefiting from the parameterized Bezier curve representation, our ABCNet can produce structured detection regions and thus the BezierAlign sampling process can be used for naturally connecting the recognition branch.
|
| 32 |
+
|
| 33 |
+
applications, especially in terms of efficiency.
|
| 34 |
+
|
| 35 |
+
Recently, many end-to-end methods [31, 37, 34, 24, 44, 21] have significantly improved the performance of oriented or curved scene text spotting. However, these methods either use segmentation-based approaches that maintain a complex pipeline or require a large amount of expensive character-level annotations. In addition, almost all of these methods are slow in inference, hampering the deployment to real-time applications. Thus, our motivation is to design a simple yet effective end-to-end framework for spotting oriented or curved scene text in images [4, 27], which ensures fast inference time while achieving an on par or even better performance compared with state-of-the-art methods.
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Figure 2. Overview of some end-to-end scene text spotting methods that are most relevant to ours. Inside the GT (ground-truth) box, 'W', 'R', and 'C' represent word-level annotation, text content, and character-level annotation, respectively. 'H', 'Q', and 'P' represent that the method is able to detect horizontal, quadrilateral, and oriented or curved text, respectively. 'RP' means that the method can recognize the curved text inside a quadrilateral box. 'R': recognition; 'BBox': bounding box. Dashed box represents the shape of the text which the method is unable to detect.
|
| 39 |
+
|
| 40 |
+
To achieve this goal, we propose the Adaptive Bezier Curve Network (ABCNet), an end-to-end trainable framework, for oriented or curved scene text spotting. ABCNet enables oriented or curved scene text detection with Bezier curve adaptation, which introduces negligible computation overhead compared with standard rectangle bounding box detection. In addition, we design a novel feature alignment layer—BezierAlign—to precisely calculate convolutional features of text instances in curved shapes, and thus high recognition accuracy can be achieved without introducing much computation cost. For the first time, we represent the oriented or curved text with parameterized Bezier curves, and the results show the effectiveness of our method. Examples of our spotting results are shown in Figure 1.
|
| 41 |
+
|
| 42 |
+
Note that previous methods such as TextAlign [11] and FOTS [25] can be viewed as a special case of ABCNet because a quadrilateral bounding box can be seen as the simplest oriented or curved bounding box with 4 straight boundaries. In addition, ABCNet can avoid complicated transformation such as 2D attention [20], making the design of the recognition branch considerably simpler.
|
| 43 |
+
|
| 44 |
+
We summarize our main contributions as follows.
|
| 45 |
+
|
| 46 |
+
- In order to accurately localize oriented and curved scene text in images, for the first time, we introduce a new concise parametric representation of curved scene text using Bezier curves. It introduces negligible computation overhead compared with the standard bounding box representation.
|
| 47 |
+
- We propose a sampling method, a.k.a. BezierAlign, for accurate feature alignment, and thus the recognition branch can be naturally connected to the overall struc
|
| 48 |
+
|
| 49 |
+
ture. By sharing backbone features, the recognition branch can be designed with a light-weight structure.
|
| 50 |
+
|
| 51 |
+
- The simplicity of our method allows it to perform inference in real time. ABCNet achieves state-of-the-art performance on two challenging datasets, Total-Text and CTW1500, demonstrating advantages in both effectiveness and efficiency.
|
| 52 |
+
|
| 53 |
+
# 1.1. Related Work
|
| 54 |
+
|
| 55 |
+
Scene text spotting requires detecting and recognizing text simultaneously instead of concerning only one task. Recently, the emergence of deep-learning-based methods have significantly advanced the performance of text spotting. Both the detection and recognition have been dramatically improved in performance. We summarized several representative deep-learning-based scene text spotting methods into the following two categories. Figure 2 shows an overview of typical works.
|
| 56 |
+
|
| 57 |
+
Regular End-to-end Scene Text Spotting Li et al. [19] propose the first deep-learning based end-to-end trainable scene text spotting method. The method successfully uses a RoI Pooling [35] to joint detection and recognition features via a two-stage framework, but it can only spot horizontal and focused text. Its improved version [20] significantly improves the performance, but the speed is limited. He et al. [11] and Liu et al. [25] adopt an anchor-free mechanism to improve both the training and inference speed. They use a similar sampling strategy, i.e., Text-Align-Sampling and RoI-Rotate, respectively, to enable extracting feature from quadrilateral detection results. Note that both of these two methods are not capable of spotting oriented or curved scene text.
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
Figure 3. The framework of the proposed ABCNet. We use cubic Bezier curves and BezierAlign to extract curved sequence features using the Bezier curve detection results. The overall framework is end-to-end trainable with high efficiency. Purple dots represent the control points of the cubic Bezier curve.
|
| 61 |
+
|
| 62 |
+
Oriented or curved End-to-end Scene Text Spotting To detect oriented or curved scene text, Liao et al. [31] propose a Mask TextSpotter which subtly refines Mask R-CNN and uses character-level supervision to simultaneously detect and recognize characters and instance masks. The method significantly improves the performance of spotting oriented or curved scene text. However, the character-level ground truths are expensive, and using free synthesized data is hard to produce character-level ground truth for real data in practice. Its improved version [21] significantly alleviated the reliance for the character-level ground truth. The method relies on a region proposal network, which restricts the speed to some extent. Sun et al. [37] propose the TextNet which produces quadrilateral detection bounding boxes in advance, and then use a region proposal network to feed the detection features for recognition. Although the method can directly recognize the oriented or curved text from a quadrilateral detection, the performance is still limited.
|
| 63 |
+
|
| 64 |
+
Recently, Qin et al. [34] propose to use a RoI Masking to focus on the oriented or curved text region. However, the results may easily be affected by outlier pixels. In addition, the segmentation branch increases the computation burden; the fitting polygon process also introduces extra time consumption; and the grouping result is usually jagged and not smooth. The work in [24] is the first one-stage oriented or curved scene text spotting method, requiring character-level ground truth data for training. Authors of [44] propose a novel sampling method, RoISlide, which uses fused features from the predicting segments of the text instances, and thus it is robust to long oriented or curved text.
|
| 65 |
+
|
| 66 |
+
# 2. Adaptive Bezier Curve Network (ABCNet)
|
| 67 |
+
|
| 68 |
+
ABCNet is an end-to-end trainable framework for spotting oriented or curved scene text. An intuitive pipeline can be seen in Figure 3. Inspired by [49, 38, 12], we adopt a single-shot, anchor-free convolutional neural network as the detection framework. Removal of anchor boxes significantly simplifies the detection for our task. Here the detection
|
| 69 |
+
|
| 70 |
+
is densely predicted on the output feature maps of the detection head, which is constructed by 4 stacked convolution layers with stride of 1, padding of 1, and $3 \times 3$ kernels. Next, we present the key components of the proposed ABCNet in two parts: 1) Bezier curve detection; and 2) BezierAlign and recognition branch.
|
| 71 |
+
|
| 72 |
+
# 2.1. Bezier Curve Detection
|
| 73 |
+
|
| 74 |
+
Compared to segmentation-based methods [41, 46, 1, 39, 47, 29], regression-based methods are more direct solutions to oriented or curved text detection, e.g., [27, 43]. However, previous regression-based methods require complicated prediction to fit the text boundary, which is not very efficient and robust for the various text shapes in practice.
|
| 75 |
+
|
| 76 |
+
To simplify the oriented or curved scene text detection, following the regression method, we find that Bezier curve, a most fundamental concept of curve representation, is suitable for parameterization of curved text. The Bezier curve represents a parametric curve $c(t)$ that uses the Bernstein Polynomials [30] as its basis. The definition is shown in Equation (1).
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
c (t) = \sum_ {i = 0} ^ {n} b _ {i} B _ {i, n} (t), 0 \leq t \leq 1, \tag {1}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where, $n$ represents the degree, $b_{i}$ represents the $i$ -th control points, and $B_{i,n}(t)$ represents the Bernstein basis polynomials, as shown in Equation (2):
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
B _ {i, n} (t) = \binom {n} {i} t ^ {i} (1 - t) ^ {n - i}, i = 0, \dots , n, \tag {2}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $\binom{n}{i}$ is a binomial coefficient. To fit arbitrary shapes of the text with Bezier curves, we comprehensively observe oriented or curved scene text from the existing datasets and the real world, and we empirically show that a cubic Bezier curve (i.e., $n$ is 3) is sufficient to fit different kinds of the oriented or curved scene text in practice. An illustration of cubic Bezier curve is shown in Figure 4.
|
| 89 |
+
|
| 90 |
+

|
| 91 |
+
Figure 4. Cubic Bezier curves. $b_{i}$ represents the control points. The green lines forms a control polygon, and the black curve is the cubic Bezier curve. Note that with only two end-points $b_{1}$ and $b_{4}$ the Bezier curve degenerates to a straight line.
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
|
| 95 |
+
Based on the cubic Bezier curve, we can simplify the oriented or curved scene text detection to a bounding box regression with eight control points in total. Note that a straight text that has four control points (four vertices) is a typical case of oriented or curved scene text. For consistency, we interpolate additional two control points in the tripartite points of each long side.
|
| 96 |
+
|
| 97 |
+
To learn the coordinates of the control points, we first generate the Bezier curve ground truths described in 2.1.1 and follow a similar regression method as in [26] to regress the targets. For each text instance, we use
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\Delta_ {x} = b _ {i x} - x _ {\min }, \Delta_ {y} = b _ {i y} - y _ {\min }, \tag {3}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $x_{min}$ and $y_{min}$ represent the minimum $x$ and $y$ values of the 4 vertexes, respectively. The advantage of predicting the relative distance is that it is irrelevant to whether the Bezier curve control points are beyond the image boundary. Inside the detection head, we only need one convolution layer with 16 outputted channels to learn the $\Delta_x$ and $\Delta_y$ , which is nearly cost-free while the results can still be accurate, which will be discussed in Section 3.
|
| 104 |
+
|
| 105 |
+
# 2.1.1 Bezier Ground Truth Generation
|
| 106 |
+
|
| 107 |
+
In this section, we briefly introduce how to generate Bezier curve ground truth based on the original annotations. The oriented or curved datasets, e.g., Total-text [4] and CTW1500 [27], use polygonal annotations for the text regions. Given the annotated points $\{p_i\}_{i=1}^n$ from the curved boundary, where $p_i$ represents the $i-th$ annotating point, the main goal is to obtain the optimal parameters for cubic Bezier curves $c(t)$ in Equation (1). To achieve this, we can simply apply standard least square method, as shown in Equation (4):
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\left[ \begin{array}{c c c} B _ {0, 3} \left(t _ {0}\right) & \dots & B _ {3, 3} \left(t _ {0}\right) \\ B _ {0, 3} \left(t _ {1}\right) & \dots & B _ {3, 3} \left(t _ {1}\right) \\ \vdots & \ddots & \vdots \\ B _ {0, 3} \left(t _ {m}\right) & \dots & B _ {3, 3} \left(t _ {m}\right) \end{array} \right] \left[ \begin{array}{l l} b _ {x _ {0}} & b _ {y _ {0}} \\ b _ {x _ {1}} & b _ {y _ {1}} \\ b _ {x _ {2}} & b _ {y _ {2}} \\ b _ {x _ {3}} & b _ {y _ {3}} \end{array} \right] = \left[ \begin{array}{l l} p _ {x _ {0}} & p _ {y _ {0}} \\ p _ {x _ {1}} & p _ {y _ {1}} \\ \vdots & \vdots \\ p _ {x _ {m}} & p _ {y _ {m}} \end{array} \right] \tag {4}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Here $m$ represents the number of annotated points for a curved boundary. For Total-Text and CTW1500, $m$ is 5 and
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
(a) Original ground truth.
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
(b) Generated results.
|
| 120 |
+
Figure 5. Comparison of Bezier curve generation. In Figure (b), for each curve boundary, the red dash lines form a control polygon, and the red dots represent the control points. Warping results are showed below. In Figure (a), we utilize TPS [2] and STN [15] to warp the original ground truth into rectangular shape. In Figure (b), we use generated Bezier curves and our BezierAlign to warp the results.
|
| 121 |
+
|
| 122 |
+
7, respectively. $t$ is calculated by using the ratio of the cumulative length to the perimeter of the polyline. According to Equation (1) and Equation (4), we convert the original polyline annotation to a parameterized Bezier curve. Note that we directly use the first and the last annotating points as the first $(b_0)$ and the last $(b_4)$ control points, respectively. A visualization comparison is shown in the Figure 5, which shows that the generating results can be even visually better than the original ground truth. In addition, based on the structured Bezier curve bounding box, we can easily using our BezierAlign described in Section 2.2 to warp the curved text into a horizontal format without dramatic deformation. More examples of the Bezier curve generation results are shown in Figure 6. The simplicity of our method allows it generalize to different kinds of text in practice.
|
| 123 |
+
|
| 124 |
+
# 2.1.2 Bezier Curve Synthetic Dataset
|
| 125 |
+
|
| 126 |
+
For the end-to-end scene text spotting methods, a massive amount of free synthesized data is always necessary, as shown in Table 2. However, the existing 800k SynText dataset [7] only provides quadrilateral bounding box for a majority of straight text. To diversify and enrich the oriented or curved scene text, we make some effort to synthesize 150k synthesized dataset (94,723 images contain a majority of straight text, and 54,327 images contain mostly curved text) with the VGG synthetic method [7]. Specially, we filter out 40k text-free background images from COCOText [40] and then prepare the segmentation mask and scene depth of each background image with [33] and [18] for the following text rendering. To enlarge the shape diversity of synthetic texts, we modify the VGG synthetic method by synthesizing scene text with various art fonts and corpus and generate the polygonal annotation for all the text instances. The annotations are then used for producing Bezier curve ground truth by the generating method described in
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Figure 6. Example results of Bezier curve generation. Green lines are the final Bezier curve results. Red dash lines represent the control polygon, and the 4 red end points represent the control points. Zoom in for better visualization.
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
|
| 133 |
+

|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
(a) Horizontal sampling.
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
(b) Quadrilateral sampling.
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
(c) BezierAlign.
|
| 147 |
+
Figure 7. Comparison between previous sampling methods and BezierAlign. The proposed BezierAlign can accurately sample features of the text region, which is essential for recognition training. Note that the align procedure is processed in intermediate convolutional features.
|
| 148 |
+
|
| 149 |
+
Section 2.1.1. Examples of our synthesized data are shown in Figure 8.
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
Figure 8. Examples of cubic Bezier curve synthesized data.
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+
# 2.2. BezierAlign
|
| 157 |
+
|
| 158 |
+
To enable end-to-end training, most of the previous methods adopt various sampling (feature alignment) methods to connect the recognition branch. Typically a sampling method represents an in-network region cropping procedure. In other words, given a feature map and Region-of-Interest (RoI), using the sampling method to select the features of RoI and efficiently output a feature map of a fixed size. However, sampling methods of previous non-segmentation based methods, e.g., RoI Pooling [19], RoI-Rotate [25], Text-Align-Sampling [11], or RoI Transform [37] cannot properly align features of oriented or curved text (RoISlide [44] numerous predicting segments). By exploiting the parameterization nature of a compact Bezier curve bounding box, we propose BezierAlign for feature sampling. BezierAlign is extended from RoIAngle [8]. Un
|
| 159 |
+
|
| 160 |
+
like RoIAlign, the shape of sampling grid of BezierAlign is not rectangular. Instead, each column of the oriented or curved grid is orthogonal to the Bezier curve boundary of the text. The sampling points have equidistant interval in width and height, respectively, which are bilinear interpolated with respect to the coordinates.
|
| 161 |
+
|
| 162 |
+
Formally given an input feature map and Bezier curve control points, we concurrently process all the output pixels of the rectangular output feature map with size $h_{out} \times w_{out}$ . Taking pixel $g_i$ with position $(g_{iw}, g_{ih})$ (from output feature map) as an example, we calculate $t$ by Equation (5):
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
t = \frac {g _ {i w}}{w _ {o u t}}. \tag {5}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
We then use $t$ and Equation (1) to calculate the point of upper Bezier curve boundary $tp$ and lower Bezier curve boundary $bp$ . Using $tp$ and $bp$ , we can linearly index the sampling point $op$ by Equation (6):
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
o p = b p \cdot \frac {g _ {i h}}{h _ {o u t}} + t p \cdot \left(1 - \frac {g _ {i h}}{h _ {o u t}}\right). \tag {6}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
With the position of $op$ , we can easily apply bilinear interpolation to calculate the result. Comparisons among previous sampling methods and BezierAlign are shown in Figure 7.
|
| 175 |
+
|
| 176 |
+
Recognition branch. Benefiting from the shared backbone feature and BezierAlign, we design a light-weight recognition branch as shown in Table 1, for faster execution. It consists of 6 convolutional layers, 1 bidirectional
|
| 177 |
+
|
| 178 |
+
<table><tr><td>Layers (CNN - RNN)</td><td>Parameters (kernel size, stride)</td><td>Output Size (n, c, h, w)</td></tr><tr><td>conv layers × 4</td><td>(3, 1)</td><td>(n, 256, h, w)</td></tr><tr><td>conv layers × 2</td><td>(3, (2,1))</td><td>(n, 256, h, w)</td></tr><tr><td>average pool for h</td><td>-</td><td>(n, 256, 1, w)</td></tr><tr><td>Channels-Permute</td><td>-</td><td>(w, n, 256)</td></tr><tr><td>BLSTM</td><td>-</td><td>(w, n, 512)</td></tr><tr><td>FC</td><td>-</td><td>(w, n, nclass)</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 1: Structure of the recognition branch, which is a simplified version of CRNN [36]. For all convolutional layers, the padding size is restricted to 1. $n$ represents batch size. $c$ represents the channel size. $h$ and $w$ represent the height and width of the outputted feature map, and $n_{class}$ represents the number of the predicted class, which is set to 97 in this paper, including upper and lower cases of English characters, digits, symbols, one category representing all other symbols, and an "EOF" of the last category.
|
| 181 |
+
|
| 182 |
+
LSTM [13] layer, and 1 fully connected layer. Based on the output classification scores, we use a classic CTC Loss [6] for text string (GT) alignment. Note that during training, we directly use the generated Bezier curve GT to extract the RoI features. Therefore the detection branch does not affect the recognition branch. In the inference phase, the RoI region is replaced by the detecting Bezier curve described in Section 2.1. Ablation studies in Experimental Section 3 demonstrate that the proposed BezierAlign can significantly improve the recognition performance.
|
| 183 |
+
|
| 184 |
+
# 3. Experiments
|
| 185 |
+
|
| 186 |
+
We evaluate our method on two recently introduced oriented or curved scene text benchmarks, Total-Text [3] and CTW1500 [27], which also contain a large amount of straight text. We also conduct ablation studies on Total-Text to verify the effectiveness of our proposed method.
|
| 187 |
+
|
| 188 |
+
# 3.1. Implemented details
|
| 189 |
+
|
| 190 |
+
The backbone of this paper follows a common setting as most of the previous papers, i.e., ResNet-50 [9] together with a Feature Pyramid Network (FPN) [23]. For detection branch, we utilize RoIAlign on 5 feature maps with 1/8, 1/16, 1/32, 1/64, and 1/128 resolution of the input image while for recognition branch, BezierAlign is conducted on three feature maps with 1/4, 1/8, and 1/16 sizes. The pretrained data is collected from publicly available English word-level-based datasets, including 150k synthesized data described in Section 2.1.2, 15k images filtered from the original COCO-Text [40], and 7k ICDAR-MLT data [32]. The pretrained model is then finetuned on the training set of the target datasets. In addition, we also adopt data augmentation strategies, e.g., random scale training, with the short size randomly being chosen from 560 to 800 and the long size being less than 1333; and random crop, which we make
|
| 191 |
+
|
| 192 |
+
sure that the crop size is larger than half of the original size and without any text being cut (for some special cases that hard to meet the condition, we do not apply random crop).
|
| 193 |
+
|
| 194 |
+
We train our model using 4 Tesla V100 GPUs with the image batch size of 32. The maximum iteration is $150\mathrm{K}$ ; and the initialized learning rate is 0.01, which reduces to 0.001 at the $70\mathrm{K}^{\mathrm{th}}$ iteration and 0.0001 at $120\mathrm{K}^{\mathrm{th}}$ iteration. The whole training process takes about 3 days.
|
| 195 |
+
|
| 196 |
+
# 3.2. Experimental results on Total-Text
|
| 197 |
+
|
| 198 |
+
Dataset. Total-text dataset [3] is one of the most important oriented or curved scene text benchmark proposed in 2017, which was collected from various scenes, including text-like scene complexity and low-contrast background. It contains 1,555 images, with 1,255 for training and 300 for testing. To resemble the real-world scenarios, most of the images of this dataset contain a large amount of regular text while guarantee that each image has at least one curved text. The text instance is annotated with polygon based on word-level. Its extended version [4] improves its annotation of training set by annotating each text instance with a fixed ten points following text recognition sequence. The dataset contains English text only. To evaluate the end-to-end results, we follow the same metric as previous methods, which use F-measure to measure the word-accuracy.
|
| 199 |
+
|
| 200 |
+
Ablation studies: BezierAlign. To evaluate the effectiveness of the proposed components, we conduct ablation studies on this dataset. We first conduct sensitivity analysis of how the number of the sampling points may affect the end-to-end results, which is shown in Table 4. From the results we can see that the number of sampling points can significantly affect the final performance and efficiency. We find (7,32) achieves the best trade-off between F-measure and FPS, which is used as the final setting in the following experiments. We further evaluate BezierAlign by comparing it with previous sampling method shown in Figure 7. The results shown in Table 3 demonstrate that the BezierAlign can dramatically improve the end-to-end results. Qualitative examples are shown in Figure 9.
|
| 201 |
+
|
| 202 |
+
Ablation studies: Bezier curve detection. Another important component is Bezier curve detection, which enables oriented or curved scene text detection. Therefore, we also conduct experiments to evaluate the time consumption of Bezier curve detection. The result in Table 5 shows that the Bezier curve detection does not introduce extra computation compared with standard bounding box detection.
|
| 203 |
+
|
| 204 |
+
Comparison with state-of-the-art. We further compare our method to previous methods. From the Table 2, we can see that our single scale result (short size being 800) can achieve a competitive performance meanwhile achieving a real time inference speed, resulting in a better trade-off between speed and word-accuracy. With multi-scale inference, ABCNet achieves state-of-the-art performance, sig
|
| 205 |
+
|
| 206 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Data</td><td rowspan="2">Backbone</td><td colspan="2">F-measure</td><td rowspan="2">FPS</td></tr><tr><td>None</td><td>Full</td></tr><tr><td>TextBoxes [22]</td><td>SynText800k, IC13, IC15, TT</td><td>ResNet-50-FPN</td><td>36.3</td><td>48.9</td><td>1.4</td></tr><tr><td>Mask TextSpotter'18 [31]</td><td>SynText800k, IC13, IC15, TT</td><td>ResNet-50-FPN</td><td>52.9</td><td>71.8</td><td>4.8</td></tr><tr><td>Two-stage [37]</td><td>SynText800k, IC13, IC15, TT</td><td>ResNet-50-SAM</td><td>45.0</td><td>-</td><td>-</td></tr><tr><td>TextNet [37]</td><td>SynText800k, IC13, IC15, TT</td><td>ResNet-50-SAM</td><td>54.0</td><td>-</td><td>2.7</td></tr><tr><td>Li et al. [20]</td><td>SynText840k, IC13, IC15, TT, MLT, AddF2k</td><td>ResNet-101-FPN</td><td>57.80</td><td>-</td><td>1.4</td></tr><tr><td>Mask TextSpotter'19 [21]</td><td>SynText800k, IC13, IC15, TT, AddF2k</td><td>ResNet-50-FPN</td><td>65.3</td><td>77.4</td><td>2.0</td></tr><tr><td>Qin et al. [34]</td><td>SynText200k, IC15, COCO-Text, TT, MLT
|
| 207 |
+
Private: 30k (manual label), 1m (partial label)</td><td>ResNet-50-MSF</td><td>67.8</td><td>-</td><td>4.8</td></tr><tr><td>CharNet [24]</td><td>SynText800k, IC15, MLT, TT</td><td>ResNet-50-Hourglass57</td><td>66.2</td><td>-</td><td>1.2</td></tr><tr><td>TextDragon [44]</td><td>SynText800k, IC15, TT</td><td>VGG16</td><td>48.8</td><td>74.8</td><td>-</td></tr><tr><td>ABCNet-F</td><td rowspan="3">SynText150k, COCO-Text, TT, MLT</td><td rowspan="3">ResNet-50-FPN</td><td>61.9</td><td>74.1</td><td>22.8</td></tr><tr><td>ABCNet</td><td>64.2</td><td>75.7</td><td>17.9</td></tr><tr><td>ABCNet-MS</td><td>69.5</td><td>78.4</td><td>6.9</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 2: Scene text spotting results on Total-Text. ABCNet-F is faster as the short size of input image is 600. MS: multi-scale testing. "None" represents recognition without any lexicon. "Full" lexicon contains all words in test set. Datasets: AddF2k [48]; IC13 [17]; IC15 [16]; TT [5]; MLT [32]; COCO-Text [40].
|
| 210 |
+
|
| 211 |
+
<table><tr><td>Methods</td><td>Sampling method</td><td>F-measure (%)</td></tr><tr><td>ABCNet</td><td>with Horizontal Sampling</td><td>38.4</td></tr><tr><td>ABCNet</td><td>with Quadrilateral Sampling</td><td>44.7</td></tr><tr><td>ABCNet</td><td>with BezierAlign</td><td>61.9</td></tr></table>
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 9. Qualitative recognition results of the quadrilateral sampling method and BezierAlign. Left: original image. Top right: results by using quadrilateral sampling. Bottom right: results by using BezierAlign.
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
|
| 226 |
+
nificantly outperforming all previous methods especially in the running time. It is worth mentioning that our faster version can be more than 11 times faster than previous best method [21] with on par accuracy.
|
| 227 |
+
|
| 228 |
+
Table 3: Ablation study for BezierAlign. Horizontal sampling follows [19], and quadrilateral sampling follows [11].
|
| 229 |
+
|
| 230 |
+
<table><tr><td>Method</td><td>Sampling points (nh, nw)</td><td>F-measure (%)</td><td>FPS</td></tr><tr><td rowspan="6">ABCNet</td><td>+(6,32)</td><td>59.6</td><td>23.2</td></tr><tr><td>+(7,32)</td><td>61.9</td><td>22.8</td></tr><tr><td>+(14,64)</td><td>58.1</td><td>19.9</td></tr><tr><td>+(21,96)</td><td>54.8</td><td>18.0</td></tr><tr><td>+(28,128)</td><td>53.4</td><td>15.1</td></tr><tr><td>+(30,30)</td><td>59.9</td><td>21.4</td></tr></table>
|
| 231 |
+
|
| 232 |
+
Table 4: Ablation study of the number of sampling points of BezierAlign.
|
| 233 |
+
|
| 234 |
+
<table><tr><td>Methods</td><td>Inference time</td></tr><tr><td>without Bezier curve detection</td><td>22.8 fps</td></tr><tr><td>with Bezier curve detection</td><td>22.5 fps</td></tr></table>
|
| 235 |
+
|
| 236 |
+
Table 5: Ablation study for time consumption of the Bezier curve detection.
|
| 237 |
+
|
| 238 |
+
Qualitative Results. Some qualitative results of ABC-Net are shown in Figure 10. The results show that our method can accurately detect and recognize most of the oriented or curved text. In addition, our method can also well handle straight text, with nearly quadrilateral compact bounding box and correct recognize results. Some errors are also visualized in the figure, which are mainly caused by mistakenly recognizing one of the characters.
|
| 239 |
+
|
| 240 |
+
# 3.3. Experimental Results on CTW1500
|
| 241 |
+
|
| 242 |
+
Dataset. CTW1500 [27] is another important oriented or curved scene text benchmark proposed in 2017. Compared to Total-Text, this dataset contains both English and Chinese text. In addition, the annotation is based on text-line level, and it also includes some document-like text, i.e., numerous small text may stack together. CTW1500 contains 1k training images, and 500 testing images.
|
| 243 |
+
|
| 244 |
+
Experiments. Because the occupation of Chinese text in this dataset is very small, we directly regard all the Chi-
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
Figure 10. Qualitative results of ABCNet on the Total-text. The detection results are shown with red bounding boxes. The float number is the predicted confidence. Zoom in for better visualization.
|
| 248 |
+
|
| 249 |
+
<table><tr><td rowspan="2">Methods</td><td rowspan="2">Data</td><td colspan="2">F-measure</td></tr><tr><td>None</td><td>Strong Full</td></tr><tr><td>FOTS [25]</td><td>SynText800k, CTW1500</td><td>21.1</td><td>39.7</td></tr><tr><td>Two-Stage* [44]</td><td>SynText800k, CTW1500</td><td>37.2</td><td>69.9</td></tr><tr><td>RoIRotate* [44]</td><td>SynText800k, CTW1500</td><td>38.6</td><td>70.9</td></tr><tr><td>LSTM* [44]</td><td>SynText800k, CTW1500</td><td>39.2</td><td>71.5</td></tr><tr><td>TextDragon [44]</td><td>SynText800k, CTW1500</td><td>39.7</td><td>72.4</td></tr><tr><td>ABCNet</td><td>SynText150k, CTW1500</td><td>45.2</td><td>74.1</td></tr></table>
|
| 250 |
+
|
| 251 |
+
Table 6: End-to-end scene text spotting results on CTW1500. * represents the results are from [44]. "None" represents lexicon-free. "Strong Full" represents that we use all the words appeared in the test set.
|
| 252 |
+
|
| 253 |
+
nese text as "unseen" class during training, i.e., the 96-th class. Note that the last class, i.e., the 97-th class is "EOF" in our implementation. We follow the same evaluation metric as [44]. The experimental results are reported in Table 6, which demonstrate that in terms of end-to-end scene text spotting, the ABCNet can significantly surpass previous state-of-the-art methods. Examples results of this dataset are showed in Figure 11. From the figure, we can see that some long text-line instances contain many words, which make a full-match word-accuracy extremely difficult. In other words incorrectly recognizing one character will result in zero scores for the whole text.
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
Figure 11. Qualitative end-to-end spotting results of CTW1500. Better viewed on screen.
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
|
| 260 |
+
# 4. Conclusion
|
| 261 |
+
|
| 262 |
+
We have proposed ABCNet—a real-time end-to-end method that uses Bezier curves for oriented or curved scene text spotting. By reformulating oriented or curved scene text using parameterized Bezier curves, ABCNet can detect oriented or curved scene text with Bezier curves which introduces negligible computation cost compared with standard bounding box detection. With such regular Bezier curve bounding boxes, we can naturally connect a lightweight recognition branch via a new BezierAlign layer.
|
| 263 |
+
|
| 264 |
+
In addition, by using our Bezier curve synthesized dataset and publicly available data, experiments on two oriented or curved scene text benchmarks (Total-Text and CTW1500) demonstrate that our ABCNet can achieve state-of-the-art performance, which is also significantly faster than previous methods.
|
| 265 |
+
|
| 266 |
+
# Acknowledgements
|
| 267 |
+
|
| 268 |
+
L. Jin's participation was in part supported by NSFC (Grant No. 61936003), National Key Research and Development Program of China (No. 2016YFB1001405), and GD-NSF (No. 2017A030312006). The authors would like to thank Huawei Technologies for the donation of GPU cloud computing resources.
|
| 269 |
+
|
| 270 |
+
# References
|
| 271 |
+
|
| 272 |
+
[1] Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, and Hwalsuk Lee. Character Region Awareness for Text Detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 9365-9374, 2019.
|
| 273 |
+
[2] Fred L. Bookstein. Principal warps: Thin-plate splines and the decomposition of deformations. IEEE Trans. Pattern Anal. Mach. Intell., 11(6):567-585, 1989.
|
| 274 |
+
[3] C.-K Chng and C.-S Chan. Total-text: A comprehensive dataset for scene text detection and recognition. In Proc. IAPR Int. Conf. Document Analysis Recog., pages 935-942, 2017.
|
| 275 |
+
[4] Chee-Kheng Chng, Chee Seng Chan, and Cheng-Lin Liu. Total-text: toward orientation robustness in scene text detection. Int. J. Document Analysis Recogn., pages 1-22, 2019.
|
| 276 |
+
[5] Chee-Kheng Chng, Yuliang Liu, Yipeng Sun, Chun Chet Ng, Canjie Luo, Zihan Ni, ChuanMing Fang, Shuaiqiao Zhang, Junyu Han, Errui Ding, et al. ICDAR2019 Robust Reading Challenge on Arbitrary-Shaped Text (RRC-ArT). Proc. IAPR Int. Conf. Document Analysis Recog., 2019.
|
| 277 |
+
[6] Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In Proc. Int. Conf. Mach. Learn., pages 369-376. ACM, 2006.
|
| 278 |
+
[7] Ankush Gupta, Andrea Vedaldi, and Andrew Zisserman. Synthetic data for text localisation in natural images. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 2315-2324, 2016.
|
| 279 |
+
[8] Kaiming He, Georgia Gkioxari, Piotr Dollr, and Ross Girshick. Mask R-CNN. In Proc. IEEE Int. Conf. Comp. Vis., 2017.
|
| 280 |
+
[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 770-778, 2016.
|
| 281 |
+
[10] Tong He, Weilin Huang, Yu Qiao, and Jian Yao. Text-attentional convolutional neural network for scene text detection. IEEE Trans. Image Process., 25(6):2529–2541, 2016.
|
| 282 |
+
[11] Tong He, Zhi Tian, Weilin Huang, Chunhua Shen, Yu Qiao, and Changming Sun. An end-to-end textspotter with explicit alignment and attention. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 5020-5029, 2018.
|
| 283 |
+
[12] Wenhao He, Xu-Yao Zhang, Fei Yin, and Cheng-Lin Liu. Deep direct regression for multi-oriented scene text detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., 2017.
|
| 284 |
+
[13] Sepp Hochreiter and Jurgen Schmidhuber. Long short-term memory. In Neural Computation, volume 9, pages 1735-1780, 1997.
|
| 285 |
+
[14] Zhida Huang, Zhuoyao Zhong, Lei Sun, and Qiang Huo. Mask r-cnn with pyramid attention network for scene text detection. In Winter Conf. Appl. Comp. Vision, pages 764-772. IEEE, 2019.
|
| 286 |
+
[15] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Proc. Advances in Neural Inf. Process. Syst., pages 2017-2025, 2015.
|
| 287 |
+
|
| 288 |
+
[16] D. Karatzas, L. Gomez-Bigorda, et al. ICDAR 2015 competition on robust reading. In Proc. IAPR Int. Conf. Document Analysis Recog., pages 1156-1160, 2015.
|
| 289 |
+
[17] D. Karatzas, F. Shafait, S. Uchida, et al. ICDAR 2013 Robust Reading Competition. In Proc. IAPR Int. Conf. Document Analysis Recog., pages 1484-1493, 2013.
|
| 290 |
+
[18] Iro Laina, Christian Rupprecht, Vasileios Belagiannis, Federico Tombari, and Nassir Navab. Deeper depth prediction with fully convolutional residual networks. In Proc. Int. Conf. 3D vision (3DV), pages 239-248. IEEE, 2016.
|
| 291 |
+
[19] Hui Li, Peng Wang, and Chunhua Shen. Towards end-to-end text spotting with convolutional recurrent neural networks. In Proc. IEEE Int. Conf. Comp. Vis., pages 5238-5246, 2017.
|
| 292 |
+
[20] Hui Li, Peng Wang, and Chunhua Shen. Towards end-to-end text spotting in natural scenes. arXiv: Comp. Res. Repository, 2019.
|
| 293 |
+
[21] Minghui Liao, Pengyuan Lyu, Minghang He, Cong Yao, Wenhao Wu, and Xiang Bai. Mask textspotter: An end-to-end trainable neural network for spotting text with arbitrary shapes. IEEE Trans. Pattern Anal. Mach. Intell., 2019.
|
| 294 |
+
[22] Minghui Liao, Baoguang Shi, Xiang Bai, Xinggang Wang, and Wenyu Liu. Textboxes: A fast text detector with a single deep neural network. In Proc. AAAI Conf. Artificial Intell., 2017.
|
| 295 |
+
[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 2117-2125, 2017.
|
| 296 |
+
[24] Xing Linjie, Tian Zhi, Huang Weilin, and R. Scott Matthew. Convolutional Networks. In Proc. IEEE Int. Conf. Comp. Vis., 2019.
|
| 297 |
+
[25] Xuebo Liu, Ding Liang, Shi Yan, Dagui Chen, Yu Qiao, and Junjie Yan. Fots: Fast oriented text spotting with a unified network. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 5676-5685, 2018.
|
| 298 |
+
[26] Yuliang Liu and Lianwen Jin. Deep matching prior network: Toward tighter multi-oriented text detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., 2017.
|
| 299 |
+
[27] Yuliang Liu, Lianwen Jin, Shuaiqiao Zhang, Canjie Luo, and Sheng Zhang. Curved scene text detection via transverse and longitudinal sequence connection. Pattern Recognition, 90:337-345, 2019.
|
| 300 |
+
[28] Yuliang Liu, Sheng Zhang, Lianwen Jin, Lele Xie, Yaqiang Wu, and Zhepeng Wang. Omnidirectional scene text detection with sequential-free box discretization. Proc. Int. Joint Conf. Artificial Intell., 2019.
|
| 301 |
+
[29] Shangbang Long, Jiaqiang Ruan, Wenjie Zhang, Xin He, Wenhao Wu, and Cong Yao. Textsnake: A flexible representation for detecting text of arbitrary shapes. In Proc. Eur. Conf. Comp. Vis., pages 20-36, 2018.
|
| 302 |
+
[30] George G. Lorentz. Bernstein polynomials. American Mathematical Soc., 2013.
|
| 303 |
+
[31] Pengyuan Lyu, Minghui Liao, Cong Yao, Wenhao Wu, and Xiang Bai. Mask textspotter: An end-to-end trainable neural network for spotting text with arbitrary shapes. In Proc. Eur. Conf. Comp. Vis., pages 67-83, 2018.
|
| 304 |
+
|
| 305 |
+
[32] Nibal Nayef, Yash Patel, Michal Busta, Pinaki Nath Chowdhury, Dimosthenis Karatzas, Wafa Khlif, Jiri Matas, Umapada Pal, Jean-Christophe Burie, Cheng-lin Liu, et al. ICDAR2019 Robust Reading Challenge on Multi-lingual Scene Text Detection and Recognition—RRC-MLT-2019. Proc. IAPR Int. Conf. Document Analysis Recog., 2019.
|
| 306 |
+
[33] Jordi Pont-Tuset, Pablo Arbelaez, Jonathan T Barron, Ferran Marques, and Jitendra Malik. Multiscale combinatorial grouping for image segmentation and object proposal generation. IEEE Trans. Pattern Anal. Mach. Intell., 39(1):128-140, 2016.
|
| 307 |
+
[34] Siyang Qin, Alessandro Bissacco, Michalis Raptis, Yasuhisa Fujii, and Ying Xiao. Towards unconstrained end-to-end text spotting. Proc. IEEE Int. Conf. Comp. Vis., 2019.
|
| 308 |
+
[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. In Proc. Advances in Neural Inf. Process. Syst., pages 91-99, 2015.
|
| 309 |
+
[36] Baoguang Shi, Xiang Bai, and Cong Yao. An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition. IEEE Trans. Pattern Anal. Mach. Intell., 39(11):2298-2304, 2016.
|
| 310 |
+
[37] Yipeng Sun, Chengquan Zhang, Zuming Huang, Jiaming Liu, Junyu Han, and Errui Ding. TextNet: Irregular Text Reading from Images with an End-to-End Trainable Network. In Proc. Asian Conf. Comp. Vis., pages 83–99. Springer, 2018.
|
| 311 |
+
[38] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. FCOS: Fully Convolutional One-Stage Object Detection. Proc. IEEE Int. Conf. Comp. Vis., 2019.
|
| 312 |
+
[39] Zhuotao Tian, Michelle Shu, Pengyuan Lyu, Ruiyu Li, Chao Zhou, Xiaoyong Shen, and Jiaya Jia. Learning Shape-Aware Embedding for Scene Text Detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 4234-4243, 2019.
|
| 313 |
+
[40] Andreas Veit, Tomas Matera, Lukas Neumann, Jiri Matas, and Serge Belongie. Coco-text: Dataset and benchmark for text detection and recognition in natural images. arXiv: Comp. Res. Repository, 2016.
|
| 314 |
+
[41] Wenhai Wang, Enze Xie, Xiang Li, Wenbo Hou, Tong Lu, Gang Yu, and Shuai Shao. Shape Robust Text Detection with Progressive Scale Expansion Network. Proc. IEEE Conf. Comp. Vis. Patt. Recogn., 2019.
|
| 315 |
+
[42] Wenhai Wang, Enze Xie, Xiaoge Song, Yuhang Zang, Wenjia Wang, Tong Lu, Gang Yu, and Chunhua Shen. Efficient and Accurate Arbitrary-Shaped Text Detection with Pixel Aggregation Network. Proc. IEEE Int. Conf. Comp. Vis., 2019.
|
| 316 |
+
[43] Xiaobing Wang, Yingying Jiang, Zhenbo Luo, Cheng-Lin Liu, Hyunsoo Choi, and Sungjin Kim. Arbitrary Shape Scene Text Detection with Adaptive Text Region Representation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 6449-6458, 2019.
|
| 317 |
+
[44] Feng Wei, He Wenhao, Yin Fei, Zhang Xu-Yao, and Cheng-Liu Liu. TextDragon: An end-to-end framework for arbitrary shaped text spotting. In Proc. IEEE Int. Conf. Comp. Vis., 2019.
|
| 318 |
+
[45] Zecheng Xie, Yaoxiong Huang, Yuzhhi Zhu, Lianwen Jin, Yuliang Liu, and Lele Xie. Aggregation cross-entropy for sequence recognition. In Proceedings of the IEEE Conference
|
| 319 |
+
|
| 320 |
+
on Computer Vision and Pattern Recognition, pages 6538-6547, 2019.
|
| 321 |
+
[46] Yongchao Xu, Yukang Wang, Wei Zhou, Yongpan Wang, Zhibo Yang, and Xiang Bai. Textfield: Learning a deep direction field for irregular scene text detection. IEEE Trans. Image Process., 2019.
|
| 322 |
+
[47] Chengquan Zhang, Borong Liang, Zuming Huang, Mengyi En, Junyu Han, Errui Ding, and Xinghao Ding. Look More Than Once: An Accurate Detector for Text of Arbitrary Shapes. Proc. IEEE Conf. Comp. Vis. Patt. Recogn., 2019.
|
| 323 |
+
[48] Zhuoyao Zhong, Lianwen Jin, Shuye Zhang, and Ziyong Feng. Deeptext: A unified framework for text proposal generation and text detection in natural images. arXiv: Comp. Res. Repository, 2016.
|
| 324 |
+
[49] Zhuoyao Zhong, Lei Sun, and Qiang Huo. An anchor-free region proposal network for faster r-cnn-based text detection approaches. Int. J. Document Analysis Recogn., 22(3):315-327, 2019.
|
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c9667281c653b8bd9f1703a3aeac9e2f2b7297742b2a6502aca30ca68c3a496
|
| 3 |
+
size 1045650
|
abcnetrealtimescenetextspottingwithadaptivebeziercurvenetwork/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c3416735dcbff61a41f4a20843b849252cd9313559a17ecef219cc56c6401b0
|
| 3 |
+
size 361046
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f502276886aecd63d79031c22e824eb314f6ccf10ff11602c7e2c42bec1d0f9
|
| 3 |
+
size 71813
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15df22d40ed69b52cd178d3c0f79bdae2a2dfa34fbb3a7a13a3b7e599739439a
|
| 3 |
+
size 88536
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/d72a0b1a-eca1-44ca-822b-7bbb0b112b60_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2431ddc225fb684e811942886043089705a67baba334294458db4274d412f2a6
|
| 3 |
+
size 475167
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/full.md
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accurate Estimation of Body Height from a Single Depth Image via a Four-Stage Developing Network
|
| 2 |
+
|
| 3 |
+
Fukun Yin* Shizhe Zhou*
|
| 4 |
+
|
| 5 |
+
College of Computer Science and Electronic Engineering,
|
| 6 |
+
|
| 7 |
+
Hunan University, Changsha, China
|
| 8 |
+
|
| 9 |
+
* joint first authors: yfk@hnu.edu.cn, shizhe@hnu.edu.cn (corresponding author)
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Non-contact measurement of human body height can be very difficult under some circumstances. In this paper we address the problem of accurately estimating the height of a person with arbitrary postures from a single depth image. By introducing a novel part-based intermediate representation plus a four-stage increasingly complex deep neural network, we manage to achieve significantly higher accuracy than previous methods. We first describe the human body in the form of a segmentation of human torso as four nearly rigid parts and then predict their lengths respectively by 3 CNNs. Instead of directly adding the lengths of these parts together, we further construct another independent developing CNN that combines the intermediate representation, part lengths and depth information together to finally predict the body height results. Here we develop an increasingly complex network architecture and adopt a hybrid pooling to optimize training process. To the best of our knowledge, this is the first method that estimates height only from a single depth image. In experiments our average accuracy reaches at $99.1\%$ for people in various positions and postures.
|
| 14 |
+
|
| 15 |
+
# 1. Introduction
|
| 16 |
+
|
| 17 |
+
In the field of three-dimensional reconstruction, medical treatment, clothes sizing, etc., human height data is indispensable. In most of the cases, we will require the tested person to stand up straight, and then use a meter or other tools to measure the height, which will consume a lot of time and manpower. Especially in actual application, it will be very hard to measure the height if we lack the measuring tools or if the measured person is a child or is injured who cannot stand up straight. Our method can effectively solve these problems as it only needs a single depth image and then outputs reliable results with an average accuracy of $99.1\%$ in milliseconds, saving a lot of manpower and time. More importantly, we do not require the measured person to
|
| 18 |
+
|
| 19 |
+
stand in a fixed place or a standard posture. They can stand in various postures such as walking, bending, sitting, etc., within a valid range that the depth camera can collect.
|
| 20 |
+
|
| 21 |
+
We propose a novel method for estimating human body height from a single depth image. We first use a Kinect [32] to capture an RGB-D image, but discard its color data and only use the depth image to estimate the height, because we find the prediction result that only using depth image is better than using RGB or RGB-D image. Secondly, we segment the human torso from the depth image by using FCN [25]. In order to make the edge information more accurate, we enhance the depth image using high-frequency information. After obtaining the torso image, we propose a novel part-based intermediate representation. The torso image and depth image are input to the same network architecture to get the body parts segmentation that the human torso is divided into four local parts: head, upper body, thigh and calf. We verify that using this intermediate representation approach can significantly improve the accuracy of estimation. After that, we modify the fully connected layer of VGG16 [35] to make it more adaptable to our problem. Then the body parts segmentation and depth information are entered into this network to get intermediate estimation of lengths of the four body parts. After obtaining the intermediate representation above, we design a novel network architecture for the final height prediction.
|
| 22 |
+
|
| 23 |
+
Even though complex neural networks have very long training time, large number of parameters, and easy to overfit [6][15][20][30], generally, they can fit the data better than the shallow neural network and the prediction may be more accurate. So we propose an increasingly complex deep neural network which we called developing network architecture. At the beginning of the network, we only use a small number of convolutional layers to train, then output the predicted values through the fully connected layer. When the network basically converges, we add new convolutional layers, and continue to train the network. Repeating this process until the network can accurately estimate. Experiments show that the accuracy of network trained by this
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1. The workflow of our approach. We use a four-stage neural network to estimate human body height from a single depth image.
|
| 27 |
+
|
| 28 |
+
process is higher, compared with that of direct training. At the same time, for different characteristics of segmentation image and depth image, we adopt different pooling strategies and use the skip-connection structure [14] to transmit these information to each block without loss, see Figure 6, which further improve the accuracy of estimation significantly.
|
| 29 |
+
|
| 30 |
+
Our main working steps are as follows: Firstly, the human torso is segmented from the depth image, and then construct an intermediate expression, body parts segmentation image, which further divides the human torso into four local parts: head, upper body, thigh and calf. After that, the lengths of these four parts are predicted separately. Finally, a novel developing network architecture is devised and the network is trained by a hybrid pooling strategy. The above process is shown in Figure 1.
|
| 31 |
+
|
| 32 |
+
Our main contributions are as follows:
|
| 33 |
+
|
| 34 |
+
1. We construct a new dataset of human body heights including 2136 RGB-D images with ten postures such as standing, walking, sitting, bending, etc. The human body can be located at any position within the range that can be captured by a depth camera, Figure 2 shows some examples from our dataset.
|
| 35 |
+
2. To the best of our knowledge, this is the first method to predict human height from a single depth image. We only need a commodity depth camera without extra equipment, thus reduces the overall equipment expenditure for practical applications. By using only depth data, we achieve higher accuracy than using depth and RGB, see Section 4.4., which is an interesting fact contradictory to the case of 3D reconstruction where depth+RGB has been proved to be the better input [9] [11].
|
| 36 |
+
3. We verify how the intermediate representation can make the final network easier to learn. It proposes to construct the human body parts segmentation image and estimate the lengths of head, upper body, thigh, and calf respectively. Compared with the method without intermediate representation, the prediction accuracy is greatly improved.
|
| 37 |
+
|
| 38 |
+
4. We put forward a network architecture with a gradual complexity of iterations which can solve the difficult problem of network initialization and significantly improve the accuracy.
|
| 39 |
+
|
| 40 |
+
# 2. Related Work
|
| 41 |
+
|
| 42 |
+
In the field of human height estimation using images or videos, there are not too many previous work especially in recent years, and we have divided these methods into the following three groups.
|
| 43 |
+
|
| 44 |
+
# 2.1. Single-view Based Prediction
|
| 45 |
+
|
| 46 |
+
The single-view-based height prediction methods are relatively easier methods in current height measurement. Their methods make the prediction commonly based on camera calibration and reference substances, with an RGB or RGB-D image acquisition device to capture images from a certain angle. Penders et al. [28] propose a reference-based method of fixing the camera to a chosen position, keeping the distance between the subject who is required to stand close to the wall and the camera constant, thus getting the measured value of the distance between the head and the feet, finally converting the measured value into the actual distance with the reference measurement. Criminisi et al. [8] put forward to use the vanishing line and vanishing point method to calibrate the camera, thus eliminating the need for camera built-in parameters. Based on the [8], Lee [19] adds a cube in the image and use genetic algorithms to improve the robustness. In [2], [10] and [12], a heterogeneous method is presented that does not use any calibration or reference, but adopts a proportional relationship between body parts for estimation.
|
| 47 |
+
|
| 48 |
+
# 2.2. Multi-view Based Prediction
|
| 49 |
+
|
| 50 |
+
The multi-angle based prediction methods can be roughly categorized into multiple angles of shooting with a single camera, or taking multiple photos with a fixed camera, or shooting with multiple cameras at different angles. Three-dimensional reconstruction is used to estimate human body data in [21] and [24]. They propose a three-dimensional
|
| 51 |
+
|
| 52 |
+
modeling through multi-angle photographs and then estimating the human body data through a cubic spline. Li et al. [23] use a home camera and a simple rotating disc to collect body images from different angles. Then perform a 3D reconstruction and refine the above model to estimate the human body data. Hung et al. [16] collect the front view, back view and side view of the human body, and then calculate the height of the human body by placing the standard reference.
|
| 53 |
+
|
| 54 |
+
# 2.3. Video-based prediction
|
| 55 |
+
|
| 56 |
+
Video-based method is also a common means of height prediction. Compared with the two methods above, this method can automatically and accurately segment the human body from the background, and then estimate the height. The collected video sequence be used to separate the background in [4], [17] and [18], then extract the feature points of the head and feet, and finally calculate the actual height by camera calibration or reference method. Li et al. [22] adopt a non-linear model to evaluate the focal distance, inclination and the height of camera, which removes the noisy interference during camera calibration. Shao et al. [33] use the moving objects in the tracking scene to recover the minimum calibration of the scene, and then adopt the single frame prediction method proposed by [8] to predict on each frame, and finally combine all the single-frame results together to obtain the final multi-frame prediction.
|
| 57 |
+
|
| 58 |
+
Although there have been many studies on estimating height from images or videos in recent years, the previous methods have some limitations. Most of the methods can only be used to measure postures such as walking and standing, or require the subject to stand at a specified position. Some methods need manual label of the head and feet, which is not fully automated and requires a lot of manpower. There are also others methods that use multiple photos or multiple devices, which cost more time and money. We are committed to research a fully automated height measurement method that requires only one depth camera for various posture, including extreme postures such as sitting and walking.
|
| 59 |
+
|
| 60 |
+
# 3. Method
|
| 61 |
+
|
| 62 |
+
This paper investigates how to estimate the height of a human body from a single depth image. In this section, we will show how to create data sets, how to establish intermediate expressions, and how to find an effective network architecture.
|
| 63 |
+
|
| 64 |
+
# 3.1.Data Set
|
| 65 |
+
|
| 66 |
+
Our first problem is to create a depth image dataset with height information. There are already some datasets containing human body information, such as W8-400 [27], RGB-D-T [29], etc., but in these data sets the human body is locat
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
RGB
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
Depth
|
| 73 |
+
180 cm
|
| 74 |
+
|
| 75 |
+

|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
175 cm
|
| 79 |
+
Figure 2. Some examples of our dataset. Our dataset consists of RGB images, depth images and the corresponding human height.
|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
177 cm
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
159 cm
|
| 90 |
+
|
| 91 |
+
ed in the center of the image, or with just a simple posture such as upright, walking, or standing straight to the camera. However, these simple data cannot meet the needs of our real-life scenes, just like when we perform 3D reconstruction on the human body, the subject may be located anywhere in the image, and pose various postures, or during the medical height measurement, the patient may not be able to stand. The methods based on these data sets cannot be effectively applied. These problems are the foci and difficulties of the height prediction field, and also the motivation of our research.
|
| 92 |
+
|
| 93 |
+
We create a human body dataset with 2136 RGB-D images using a Kinect camera [32], but we only use the depth images, and the RGB images can be used by relevant research in the future. The data set consists of 10 postures, including walking, bending, sitting, etc., see Figure 8. There are 14 volunteers in our dataset. They can stand anywhere with arbitrary clothes. Their heights ranges from $158\mathrm{cm}$ to $184\mathrm{cm}$ , which covers a wide range of height [7]. Figure 2 shows some examples from our dataset.
|
| 94 |
+
|
| 95 |
+
Next, we need to consider how to organize the training data to ensure the network really establishes a connection between depth information and height, in stead of connection between identity and height. To verify this, we extract a man and a woman from 14 people and put all of their 369 images into the test set, also called Strange-test, to avoid the network learning their identity information. For the other 12 people, each person randomly selects 5 images with a total of 60 into the test set, also called Familiar-test.
|
| 96 |
+
|
| 97 |
+
In summary, our dataset contains 2136 depth images equipped with their corresponding body height values, which is divided into a training set containing 1707 images and a test set containing 429 images.
|
| 98 |
+
|
| 99 |
+
# 3.2. Intermediate Representation
|
| 100 |
+
|
| 101 |
+
As discussed below, we will consider how to establish an intermediate representation of height information from depth images, making it easier and more efficient for the network to estimate height information.
|
| 102 |
+
|
| 103 |
+
Segmentation of human body parts from depth images as intermediate representation has been well applied in pose
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
Figure 3. The network architecture of $f^1(X)$ . The number below each block or layer represents their input size. Input a depth image $X^D$ and an edge image $E$ to output the corresponding torso image $T$ .
|
| 107 |
+
|
| 108 |
+
estimation [26] [34]. Similarly, in this paper, we use the human body parts segmentation image within our intermediate representation.
|
| 109 |
+
|
| 110 |
+
In order to make the segmentation more suitable for our problems, we observe that the human head, upper body, thigh, calf are nearly rigid, and the height can be expressed as the sum of the four parts. More importantly, their relative positions can reflect the overall posture. Therefore, we segment the human torso into these four nearly rigid parts using depth images and torso images. In order to eliminate the interference of the hairstyle, we define the head part as between the eyebrow to the neck. We show some of our intermediate representation images in Figure 4.
|
| 111 |
+
|
| 112 |
+
Experiments show that using our intermediate representation can significantly increase the accuracy of height estimation, see Section 4.4. It provides three advantages:
|
| 113 |
+
|
| 114 |
+
1. We decompose the problem of human height estimation into estimating the height of four nearly rigid parts as the length of a rigid object is more predictable.
|
| 115 |
+
2. The topological structures and length proportion relationships between these four parts contain the posture information, which provides a powerful clue for height prediction.
|
| 116 |
+
3. Convolutional neural network (CNN) has a good performance in local perception [1]. Decomposing the problem into local small problems can take advantage of CNN and simplify complexity, which makes the final estimation more accurate and stable, see Table 1.
|
| 117 |
+
|
| 118 |
+
# 3.3. Network Architecture
|
| 119 |
+
|
| 120 |
+
In this section we will describe our network architecture for obtaining the intermediate representation.
|
| 121 |
+
|
| 122 |
+
In the first step, we need to segment the human torso image from the depth image. FCN [25], Unet++ [36], Mask r-cnn [13], and other methods [5] [31] have great performance in pixel-to-pixels image segmentation. We slightly modify the input so that we can accept the depth as input and output the human torso segmentation image. However, although these methods can generally segment the torso information, but cannot perform well at the edge of the human body, especially at the head and the feet area. We know
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
Figure 4. Some examples of our part-based intermediate representation. Input the depth image $X^D$ and the torso image $T$ into $f^2(X)$ to get the label image $T$ as our intermediate representation in the form of a segmentation of human torso into four parts.
|
| 126 |
+
|
| 127 |
+
that in the field of distance prediction, the determination of the starting point and the ending point is of paramount importance, see Figure 10. So we consider inputting the high frequency information of the depth image to the network to improve the prediction ability of the edge. Let the original image as $X^{D}$ and the high frequency image as $E$ . We use the canny operator [3] to extract the edge information:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
E = \operatorname {c a n n y} \left(X ^ {D}\right) \tag {1}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
Then we input the depth image $X^D$ and the high frequency information $E$ into the convolutional neural network $f^{1}(X)$ as shown in Figure 3, and define the loss function as:
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathfrak {L} = \frac {1}{N} \sum_ {i \in N} \left| \left| f _ {i} ^ {1} \left(X ^ {D}, E\right) - T _ {i} \right| \right| ^ {2} \tag {2}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
Among them, $T$ is the human torso image, $i$ every pixel and $N$ is the total number of pixels in the torso image.
|
| 140 |
+
|
| 141 |
+
We adopt the same convolutional network architecture as the $f^1 (X)$ to design a new network $f^{2}(X)$ that input depth image $X^{D}$ and the torso image $T$ to output the corresponding label image $L$ as our intermediate representation. Similarly, we define the loss function of the network as:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\mathfrak {L} = \frac {1}{N} \sum_ {i \in N} \left| \left| f _ {i} ^ {2} \left(X ^ {D}, T\right) - L _ {i} \right| \right| ^ {2} \tag {3}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
$i$ every pixel and $N$ is the total number of pixels in the label image $L$ .
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
Figure 5. The network architecture of $f^3(X)$ . We modify part of the full convolution layers and add three full connected layers based on VGG16 to input a depth image $X^D$ and an edge image $E$ then output the estimated lengths of these four parts.
|
| 151 |
+
|
| 152 |
+
After dividing the torso into four parts, we need to predict these four parts separately, and get their lengths $H^{head}$ , $H^{upperbody}$ , $H^{thigh}$ , $H^{calf}$ of the head, upper body, thigh and calf in the image respectively. We design a new network architecture $f^3 (X)$ similar to VGG16 [4], but modify the fully convolution layer and add three full connected layers at the end of the network to make it more suitable for our problem. The network structure is shown in Figure 5. We enter the depth image $X^D$ , and the label image $L$ into the network, and get the estimated length of these 4 parts, namely:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\left[ H ^ {h e a d} H ^ {u p p e r b o d y} H ^ {t h i g h} H ^ {c a l f} \right] ^ {1 * 4} = f ^ {3} \left(X ^ {D}, L\right) \tag {4}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
In this way, we construct our intermediate representation method of height prediction, a part-based segmentation image and the length of each part. In the next section, we will discuss how to use this intermediate representation for the final prediction and verify our proposed representation method is effective in Chapter 4.
|
| 159 |
+
|
| 160 |
+
# 3.4. Developing Network
|
| 161 |
+
|
| 162 |
+
In this section, we will discuss how to use our intermediate representation to estimate body height. We design a developing network architecture with a hybrid pooling approach to solve this problem. In large-scale and medium-scale convolutional blocks, it is still very important to obtain accurate depth information $X^{D}$ and label information $L$ , so we enhance a skip connection structure [14] to directly add these information to the output of the previous convolution
|
| 163 |
+
|
| 164 |
+
al layer as input of the next convolutional layer. Initially, we simply let the depth information $X^{D}$ and the label information $L$ use the same pooling strategy. However, we find that even if the skip connection structure is added, the network accuracy does not increase significantly. Later, we observe that there are a lot of noise in the depth image $X^{D}$ . It is easy to be interfered by these noises with maxpool, so we change the pooling mode to average pooling which can bring a certain sense of smoothness, and alleviate the interference from the local extreme pixels. We also find that for label image, however, maxpool is more suitable. So we propose a new network architecture based on VGG16 [35] with skip connection structure and a hybrid pool strategy. The network structure diagram $f^{4}(X)$ is shown in Figure 6.
|
| 165 |
+
|
| 166 |
+
During the training process, the network is prone to overfitting. So we propose an increasingly complex network
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 7. The change of our developing network structure with different iteration intervals. The blue layers are already working layers, and the yellow layers are newly added layers with the number of iterations.
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
Figure 6. The network architecture of $f^4 (X)$ . We adopt different pooling strategies for the depth image $X^{D}$ and the label image $L$ to estimate human height.
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
Figure 8. Some estimation results. The first and second row are a female and a male volunteer from the Strange-test respectively. The third row shows volunteers from the Familiar-test. All subjects can be located anywhere and pose various postures. In any case, our method can make accurate estimation only from a single depth image.
|
| 176 |
+
|
| 177 |
+
structure, i.e. developing network to avoid overfitting, as shown in Figure 7.We first pre-train with all convolutional layers until the number of iterations exceeds $N_{1}$ . Then the model is saved, and only the first layer network of each block is reserved. When the iterations exceeds $N_{2}$ ,the second layer of the first and second block is added to training. Similarly, a new layer of the third, fourth and fifth blocks is added when the iterations exceeds $N_{3}$ and $N_{4}$ . It will continue to train until the iteration number reaches $N_{5}$
|
| 178 |
+
|
| 179 |
+
We train these four subnetworks, i.e., $f^1(X)$ , $f^2(X)$ , $f^3(X)$ and $f^4(X)$ one-at-a-time, see Figure 1. Our proposed network architecture can effectively use our intermediate representation to predict the height of human body, and has excellent performance in preventing over-fitting, see Figure 9 and Table 3.
|
| 180 |
+
|
| 181 |
+
# 4. Experiment
|
| 182 |
+
|
| 183 |
+
We conduct a series of experiments to validate our method, including whether to use intermediate representation, whether to adopt our developing network, and different input types. We also compare the accuracy with other methods. Experiments show that using intermediate representation and our network architecture has the highest accuracy over other methods. We show some results of our method in Figure 8.
|
| 184 |
+
|
| 185 |
+
# 4.1. Data Preparation and Parameter Settings
|
| 186 |
+
|
| 187 |
+
We resize 2136 depth images to $256*256$ and $224*224$ . In the networks $f^{1}(X)$ and $f^{2}(X)$ , we use images of size $256*256$ , in the network $f^{3}(X)$ and $f^{4}(X)$ , we use the size of $224*224$ . We train our network on the train set of 1707 images and test on the test set of 429 images.
|
| 188 |
+
|
| 189 |
+
The depths of the networks $f^1(X)$ and $f^2(X)$ are 21, and the other two are 19. The initial learning rate of the
|
| 190 |
+
|
| 191 |
+
networks $f^1(X)$ and $f^2(X)$ are set to 0.0001, and every 5 epochs we decrease the learning rate by a factor of 0.8. The other two are set to 0.0001, and for every 50 epochs will decrease the learning rate by a factor of 0.5. The batch size of network is set to 8.
|
| 192 |
+
|
| 193 |
+
We use the average relative error as evaluation index, which is defined as:
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
A v e r a g e R e l a t i v e E r r o r = \frac {1}{n} \sum_ {n} \left| H _ {e} - H _ {a} \right| / H _ {a} \tag {5}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
$n$ is the number of samples in the test set. $H_{e}$ is the estimated height, and $H_{a}$ is the real height.
|
| 200 |
+
|
| 201 |
+
Correspondingly, we define the accuracy as:
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
A c c u r a c y = 1 - A v e r a g e R e l a t i v e E r r o r \tag {6}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
# 4.2. Validity of the Intermediate Representation
|
| 208 |
+
|
| 209 |
+
In this section, we will verify the validity of our intermediate representation proposed in Section 3.2. We conduct three sets of experiments: our method, the method without intermediate expression (denoted by M1), and the method with partial intermediate expressions (denoted by M2).
|
| 210 |
+
|
| 211 |
+
In our method, we completely follow the steps mentioned in Section 3.2 to get the intermediate representation. In M1, we only use $f^1(X)$ to segment the image, and then input the obtained torso image $T$ and depth image $X^D$ into $f^4(X)$ to estimate height. In M2, we input the label image $L$ which is obtained by $f^1(X)$ and $f^2(X)$ sequentially, and the depth image $X^D$ into $f^4(X)$ to get the result. Table 1 shows the relative error of the three methods.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Method Name</td><td>Error</td></tr><tr><td>Ours</td><td>0.90%</td></tr><tr><td>M1</td><td>1.71%</td></tr><tr><td>M2</td><td>1.20%</td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 1. Average Relative Error of Our Method, M1, M2.
|
| 216 |
+
|
| 217 |
+
It can be seen from Table 1 that our error is $43\%$ less than that of M1, and $19\%$ less than that of M2. At the same time, we count the number of images of the three methods in different error intervals, as shown in Table 2.
|
| 218 |
+
|
| 219 |
+
<table><tr><td>Error Interval</td><td>Ours</td><td>M1</td><td>M2</td></tr><tr><td>0<Error≤1%</td><td>273</td><td>172</td><td>228</td></tr><tr><td>1%<Error≤2%</td><td>101</td><td>113</td><td>128</td></tr><tr><td>2%<Error≤3%</td><td>35</td><td>65</td><td>45</td></tr><tr><td>3%<Error≤4%</td><td>17</td><td>45</td><td>16</td></tr><tr><td>4%<Error≤5%</td><td>2</td><td>21</td><td>3</td></tr><tr><td>5%<Error≤6%</td><td>1</td><td>7</td><td>6</td></tr><tr><td>6%<Error≤7%</td><td>0</td><td>3</td><td>1</td></tr><tr><td>7%<Error≤8%</td><td>0</td><td>0</td><td>1</td></tr><tr><td>8%<Error≤9%</td><td>0</td><td>2</td><td>1</td></tr><tr><td>9%<Error≤10%</td><td>0</td><td>1</td><td>0</td></tr></table>
|
| 220 |
+
|
| 221 |
+
It is clear that from Table 2 that our method has excellent performance in reducing the extreme value. $99.3\%$ of all results have error lower than $4\%$ , and $87.2\%$ of them lower than $2\%$ .
|
| 222 |
+
|
| 223 |
+
The result demonstrates that using our intermediate representation can significantly improve accuracy and reduce the extremely incorrect estimation.
|
| 224 |
+
|
| 225 |
+
# 4.3. Effectiveness of the Network Architecture
|
| 226 |
+
|
| 227 |
+
In order to verify the validity of our network architecture proposed in Section 3.3, we conduct two groups of contrast tests (called M3, M4). In M3, rather than using the increasingly complex network architecture, we use all convolutional layers to train the network. In M4, we do not pre-train the network, but gradually restore the network architecture from the simplest architecture. Table 3 shows the relative errors of the three methods.
|
| 228 |
+
|
| 229 |
+
Table 2. Number of Samples within Different Error Intervals in Our method, M1 and M2.
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Method Name</td><td>Error</td></tr><tr><td>Ours</td><td>0.90%</td></tr><tr><td>M3</td><td>0.97%</td></tr><tr><td>M4</td><td>1.07%</td></tr></table>
|
| 232 |
+
|
| 233 |
+
As can be seen from Table 3, our architecture is better than M3 and M4 by $7.22\%$ and $15.89\%$ less error respectively.
|
| 234 |
+
|
| 235 |
+
We plot the curve of accuracy rate during the training process in Figure 9. In our implementation, we set $N1 = 40000$ , $N2 = 60000$ , $N3 = 80000$ , $N4 = 100000$ , $N5 = 160000$ . Before the $N1$ point, M3 adopts the same training method as ours. We observe that the two methods have the similar correct rates. After reaching $N1$ , although the accuracy rate of our method decreases at the beginning, it will gradually increase with the recovery of the network structure, and the final accuracy even exceeds that of M3 and M4, which indicates that our architecture is effective.
|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
Figure 9. The accuracy rate curve with the number of iterations in four methods. In order to make the image clearer, we stretched the data within the range of [98,99.2] by 10 times.
|
| 239 |
+
|
| 240 |
+
# 4.4. Comparison of Different Input Types
|
| 241 |
+
|
| 242 |
+
In this section we will show which input type is the best for our height estimation network. We conduct three sets of experiments using different inputs: RGB, RGB-D and depth only. For both RGB and RGB-D types, we only change the depth image in the input of network $f^{1}(X)$ , $f^{2}(X)$ , $f^{3}(X)$ and $f^{4}(X)$ to the corresponding image. Then we list the relative errors of these three input types in Table 4.
|
| 243 |
+
|
| 244 |
+
Table 3. Average Relative Error of Our Method, M3 and M4.
|
| 245 |
+
|
| 246 |
+
<table><tr><td>Input Type</td><td>Test Set</td><td>Strange-test</td><td>Familiar-test</td></tr><tr><td>RGB</td><td>1.35%</td><td>1.47%</td><td>0.64%</td></tr><tr><td>RGB-D</td><td>1.05%</td><td>1.13%</td><td>0.56%</td></tr><tr><td>Depth(Ours)</td><td>0.90%</td><td>0.95%</td><td>0.60%</td></tr></table>
|
| 247 |
+
|
| 248 |
+
Table 4. Average Relative Error of These Three Input Types on Test Set, Strange-test and Familiar-test.
|
| 249 |
+
|
| 250 |
+
Intuitively, RGB-D images contain more information than the other two types. This means using RGB-D images as input can output the most information. However, we find that solely using depth images generates the best result. We anticipate that the reason is because RGB data in the input will make the network establish a connection between identity and height, rather than estimating height from the image.
|
| 251 |
+
|
| 252 |
+
In order to verify our conclusion, we conduct experiments respectively on the two test sets: Strange-test and Familiar-test proposed in Section 3.1. We show the average relative error of each input type in these two test data sets in Table 4.
|
| 253 |
+
|
| 254 |
+
From Table 4 we find that the relative error of the input type using RGB image on the Strange-test is more than twice over the Familiar-test. This confirms our previous conclusion that using RGB images will enable the neural networks to learn a connection between identity and height.
|
| 255 |
+
|
| 256 |
+
<table><tr><td>Posture Name</td><td>Ours</td><td>Deák et al. [10]</td><td>CRIMINISI et al. [8]</td><td>Camera Calibration</td></tr><tr><td>Upright</td><td>0.59%</td><td>4.10%</td><td>1.63%</td><td>1.35%</td></tr><tr><td>Walking</td><td>0.97%</td><td>2.95%</td><td>7.41%</td><td>2.79%</td></tr><tr><td>Sitting</td><td>1.00%</td><td>3.58%</td><td>4.82%</td><td>25.50%</td></tr><tr><td>Bending</td><td>2.19%</td><td>9.92%</td><td>5.38%</td><td>28.53%</td></tr><tr><td>Arms raising Slightly</td><td>0.78%</td><td>4.38%</td><td>1.45%</td><td>1.20%</td></tr><tr><td>Unrolling Arms</td><td>0.77%</td><td>3.93%</td><td>1.52%</td><td>1.51%</td></tr><tr><td>Arms over Head</td><td>0.91%</td><td>4.86%</td><td>1.52%</td><td>1.48%</td></tr><tr><td>Waving Hands</td><td>0.75%</td><td>4.45%</td><td>1.54%</td><td>1.53%</td></tr><tr><td>Clapping</td><td>0.69%</td><td>5.50%</td><td>1.58%</td><td>2.91%</td></tr><tr><td>Having a Waist Line</td><td>0.73%</td><td>4.39%</td><td>1.59%</td><td>2.97%</td></tr><tr><td>Total Average Error</td><td>0.90%</td><td>4.80%</td><td>6.44%</td><td>2.69%</td></tr></table>
|
| 257 |
+
|
| 258 |
+
Table 5. The Average Relative Error between Our method and Other Methods in Different Postures.
|
| 259 |
+
|
| 260 |
+
# 4.5. Comparison to Other Methods
|
| 261 |
+
|
| 262 |
+
This section compares our method with other methods on our test set, including Deak et al. [10] based on the proportional relationship between body parts, CRIMINISI et al. [8] based on vanishing point and vanishing line, and the Kinect camera calibration method [32]. Table 5 shows the comparison results.
|
| 263 |
+
|
| 264 |
+
It can be seen from the comparison that the average error of our method on the whole test set is significantly better than other methods. Our average relative error is approximately 0.90 which can accurately extract the human body height from the image. Since the methods of CRIMINISI et al. [8] and Camera calibration can not cope well with the non-upright posture of the human body, in Table 5, we separately calculate the error of the four methods according to different postures of the human body. It is noticeable that ours is the best among the four methods in various postures.
|
| 265 |
+
|
| 266 |
+
At the same time, we analyze the error of other methods. Deák et al. [10] only relied on the ratio between the pixel length of the head and the pupillary distance, so its sensitivity to the gesture is small. But this method requires that a human face should be possibly perpendicular to the camera's optical axis, otherwise the ratio of the distance between the human eye and the length of the head will not be correctly extracted. Besides, due to the differences of individuals, it is not convincingly accurate to solve the height of the person through the statistical law. In the methods of CRIMINISI et al. [8] and Camera calibration, when a person walks, sits, or bends, the straight line distance between the top of the head and the bottom of the foot cannot be a good representation of the height of the human body.
|
| 267 |
+
|
| 268 |
+
Although these two methods have better performance in the upright pose compared to the aforementioned postures, they have the inherent problem in predicting body height from a single-view image. As shown in Figure 10, the distance $h2$ between dash lines is the real height of the human body, and $h1$ is the predicted height according to the image. Therefore, there is still about $1.5\%$ error in the upright posture for both methods.
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
Figure 10. A failure case with estimating height only using a single-view image in [10] and [8].
|
| 272 |
+
|
| 273 |
+
# 5. Conclusion and Future Works
|
| 274 |
+
|
| 275 |
+
We create a method for accurately and quickly estimating body height from a depth image based on increasingly complex network architecture. We propose an intermediate representation based on an effective body torso segmentation, which is automatically obtained by adding high-frequency information of the depth image into a FCN. We first predicts the lengths of each body parts respectively and eventually construct a developing and complex network for the final estimation which effectively suppresses the model over-fitting phenomenon. Our method can cope with sitting, bending, walking and many other postures, and the accuracy rate can even reach at $99.1\%$ .
|
| 276 |
+
|
| 277 |
+
In future we may enrich our current dataset with more subjects and refine the human body segmentation based on semantic bioinformation which we believe will further improve the accuracy. For future direction, we would like to explore more non-contact measuring techniques of geometric and physical units such as weight and density using optimized deep learning with various inputs.
|
| 278 |
+
|
| 279 |
+
# 6. Acknowledgements
|
| 280 |
+
|
| 281 |
+
This work was supported by the grant of Science Foundation of Hunan Province(No.2018JJ3064), National Science Foundation of China(No.61303147). We gratefully acknowledge NVIDIA for GPU donation. We thank Dan Yin, Wei Cai and Zeyu Liu for their help on dataset preparation.
|
| 282 |
+
|
| 283 |
+
# References
|
| 284 |
+
|
| 285 |
+
[1] Angelos Amanatiadis, Vasileios G Kaburlasos, and Elias B Kosmatopoulos. Understanding deep convolutional networks through gestalt theory. In 2018 IEEE International Conference on Imaging Systems and Techniques (IST), pages 1-6. IEEE, 2018. 4
|
| 286 |
+
[2] Chiraz BenAbdelkader and Yaser Yacoob. Statistical body height estimation from a single image. In 2008 8th IEEE International Conference on Automatic Face & Gesture Recognition, pages 1-7. IEEE, 2008. 2
|
| 287 |
+
[3] John Canny. A computational approach to edge detection. IEEE Transactions on pattern analysis and machine intelligence, (6):679-698, 1986. 4
|
| 288 |
+
[4] Yu Chai and Xiaojing Cao. A real-time human height measurement algorithm based on monocular vision. In 2018 2nd IEEE Advanced Information Management, Communicates, Electronic and Automation Control Conference (IMCEC), pages 293-297. IEEE, 2018. 3
|
| 289 |
+
[5] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In Proceedings of the European conference on computer vision (ECCV), pages 801-818, 2018. 4
|
| 290 |
+
[6] Michael Cogswell, Faruk Ahmed, Ross Girshick, Larry Zitnick, and Dhruv Batra. Reducing overfitting in deep networks by decorrelating representations. arXiv preprint arXiv:1511.06068, 2015. 1
|
| 291 |
+
[7] NCD Risk Factor Collaboration et al. A century of trends in adult human height. *Elife*, 5:e13410, 2016. 3
|
| 292 |
+
[8] Antonio Criminisi, Ian Reid, and Andrew Zisserman. Single view metrology. International Journal of Computer Vision, 40(2):123-148, 2000. 2, 3, 8
|
| 293 |
+
[9] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG), 36(3):24, 2017. 2
|
| 294 |
+
[10] A Deak, O Kainz, M Michalko, and F Jakab. Estimation of human body height from uncalibrated image. In 2017 15th International Conference on Emerging eLearning Technologies and Applications (ICETA), pages 1-4. IEEE, 2017. 2, 8
|
| 295 |
+
[11] Yanping Fu, Qingan Yan, Long Yang, Jie Liao, and Chunxia Xiao. Texture mapping for 3d reconstruction with rgb-d sensor. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4645-4653, 2018. 2
|
| 296 |
+
[12] Ye-Peng Guan. Unsupervised human height estimation from a single image. Journal of Biomedical Science and Engineering, 2(06):425, 2009. 2
|
| 297 |
+
[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 4
|
| 298 |
+
[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2, 5
|
| 299 |
+
|
| 300 |
+
[15] Yihui He, Xiangyu Zhang, and Jian Sun. Channel pruning for accelerating very deep neural networks. In Proceedings of the IEEE International Conference on Computer Vision, pages 1389-1397, 2017. 1
|
| 301 |
+
[16] Patrick Chi-Yuen Hung, Channa P Witana, and Ravindra S Goonetilleke. Anthropometric measurements from photographic images. Computing Systems, 29:764-769, 2004. 3
|
| 302 |
+
[17] Erno Jeges, Istvan Kispal, and Zoltan Hornak. Measuring human height using calibrated cameras. In 2008 Conference on Human System Interactions, pages 755-760. IEEE, 2008. 3
|
| 303 |
+
[18] István Kispál and Ern Jeges. Human height estimation using a calibrated camera. In Proc. CVPR, 2008. 3
|
| 304 |
+
[19] Kual-Zheng Lee. A simple calibration approach to single view height estimation. In 2012 Ninth Conference on Computer and Robot Vision, pages 161-166. IEEE, 2012. 2
|
| 305 |
+
[20] Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, and Hans Peter Graf. Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710, 2016. 1
|
| 306 |
+
[21] Jie Li, Mingui Sun, Hsin-Chen Chen, Zhaoxin Li, and Wenyan Jia. Anthropometric measurements from multi-view images. In 2012 38th Annual Northeast Bioengineering Conference (NEBEC), pages 426-427. IEEE, 2012. 2
|
| 307 |
+
[22] Shengzhe Li, Van Huan Nguyen, Mingjie Ma, Cheng-Bin Jin, Trung Dung Do, and Hakil Kim. A simplified nonlinear regression method for human height estimation in video surveillance. EURASIP Journal on Image and Video Processing, 2015(1):32, 2015. 3
|
| 308 |
+
[23] Zhaoxin Li, Wenyan Jia, Zhi-Hong Mao, Jie Li, Hsin-Chen Chen, Wangmeng Zuo, Kuanquan Wang, and Mingui Sun. Anthropometric body measurements based on multi-view stereo image reconstruction. In 2013 35th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC), pages 366-369. IEEE, 2013. 3
|
| 309 |
+
[24] Yingying Liu, Arcot Sowmya, and Heba Khamis. Single camera multi-view anthropometric measurement of human height and mid-upper arm circumference using linear regression. *PloS one*, 13(4):e0195600, 2018. 2
|
| 310 |
+
[25] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 1, 4
|
| 311 |
+
[26] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through weakly-supervised learning of a rich intermediate representation. Pre-print: arxiv, 151106728, 2015. 4
|
| 312 |
+
[27] Tam V Nguyen, Jiashi Feng, and Shuicheng Yan. Seeing human weight from a single rgb-d image. Journal of Computer Science and Technology, 29(5):777-784, 2014. 3
|
| 313 |
+
[28] Bas Penders, Ralph Brecheisen, Angele Gerver, Geertjan van Zonneveld, and Willem-Jan Gerver. Validating paediatric morphometrics: body proportion measurement using photogrammetric anthropometry. Journal of pediatric endocrinology and metabolism, 28(11-12):1357-1362, 2015. 2
|
| 314 |
+
[29] Christian Pfitzner, Stefan May, and Andreas Nüchter. Body weight estimation for dose-finding and health monitoring of
|
| 315 |
+
|
| 316 |
+
lying, standing and walking patients based on rgb-d data. Sensors, 18(5):1311, 2018. 3
|
| 317 |
+
[30] Aaditya Prakash, James Storer, Dinei Florencio, and Cha Zhang. Repr: Improved training of convolutional filters. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 10666-10675, 2019. 1
|
| 318 |
+
[31] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4
|
| 319 |
+
[32] Microsoft Kinect sensors for Windows SDK. Available online: https://docs.microsoft.com/en-us/previousversions/windows/kinect. 1, 3, 8
|
| 320 |
+
[33] Jie Shao, Shaohua Kevin Zhou, and Rama Chellappa. Robust height estimation of moving objects from uncalibrated videos. IEEE Transactions on Image Processing, 19(8):2221-2232, 2010. 3
|
| 321 |
+
[34] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 4
|
| 322 |
+
[35] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 1, 5
|
| 323 |
+
[36] Zongwei Zhou, Md Mahfuzur Rahman Siddiquee, Nima Tajbakhsh, and Jianming Liang. Unet++: A nested u-net architecture for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, pages 3-11. Springer, 2018. 4
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a589099ee8d216aa5447bd2501de6def211f61eb3a01256227a5793aa4bdd2f1
|
| 3 |
+
size 526765
|
accurateestimationofbodyheightfromasingledepthimageviaafourstagedevelopingnetwork/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05cd355d8d64deed3db901b97c66f119033398552389d13cb06f02099572c548
|
| 3 |
+
size 390858
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:58991e783cf560d4d3d59c64ac908982ea944308e0b8f0aad7a62705e9e46737
|
| 3 |
+
size 91635
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ea80cd120f0547e11c98d5316622949b334250ef16298320ae942944ed1f2c6
|
| 3 |
+
size 120040
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/31e82398-e561-4b4c-a670-94e39e04ec8f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:caf6abede42cc3dbd484939da6b389d31afb203f6c44a81f75c4a27d5c0beb4d
|
| 3 |
+
size 1293223
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/full.md
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Achieving Robustness in the Wild via Adversarial Mixing with Disentangled Representations
|
| 2 |
+
|
| 3 |
+
Sven Gowal*
|
| 4 |
+
|
| 5 |
+
DeepMind
|
| 6 |
+
|
| 7 |
+
Chongli Qin*
|
| 8 |
+
|
| 9 |
+
chongliqin@google.com
|
| 10 |
+
|
| 11 |
+
Po-Sen Huang
|
| 12 |
+
|
| 13 |
+
posenhuang@google.com
|
| 14 |
+
|
| 15 |
+
Taylan Cemgil
|
| 16 |
+
|
| 17 |
+
taylancemgil@google.com
|
| 18 |
+
|
| 19 |
+
sgowal@google.com
|
| 20 |
+
|
| 21 |
+
Krishnamurthy (Dj) Dvijotham
|
| 22 |
+
|
| 23 |
+
dvij@google.com
|
| 24 |
+
|
| 25 |
+
Timothy Mann
|
| 26 |
+
|
| 27 |
+
timothymann@google.com
|
| 28 |
+
|
| 29 |
+
Pushmeet Kohli
|
| 30 |
+
|
| 31 |
+
pushmeet@google.com
|
| 32 |
+
|
| 33 |
+
# Abstract
|
| 34 |
+
|
| 35 |
+
Recent research has made the surprising finding that state-of-the-art deep learning models sometimes fail to generalize to small variations of the input. Adversarial training has been shown to be an effective approach to overcome this problem. However, its application has been limited to enforcing invariance to analytically defined transformations like $\ell_p$ -norm bounded perturbations. Such perturbations do not necessarily cover plausible real-world variations that preserve the semantics of the input (such as a change in lighting conditions). In this paper, we propose a novel approach to express and formalize robustness to these kinds of real-world transformations of the input. The two key ideas underlying our formulation are (1) leveraging disentangled representations of the input to define different factors of variations, and (2) generating new input images by adversarially composing the representations of different images. We use a StyleGAN model to demonstrate the efficacy of this framework. Specifically, we leverage the disentangled latent representations computed by a StyleGAN model to generate perturbations of an image that are similar to real-world variations (like adding make-up, or changing the skin-tone of a person) and train models to be invariant to these perturbations. Extensive experiments show that our method improves generalization and reduces the effect of spurious correlations (reducing the error rate of a "smile" detector by $21\%$ for example).
|
| 36 |
+
|
| 37 |
+
# 1. Introduction
|
| 38 |
+
|
| 39 |
+
The principle by which neural networks are trained to minimize their average error on the training data is known as Empirical Risk Minimization (ERM) [1]. ERM has, for the most part, enabled breakthroughs in a wide variety of fields [2-4], and this success has lead to the usage of neural networks in applications that are safety-critical [5]. ERM, however, is only guaranteed to produce meaningful models
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Label - Classical training
|
| 43 |
+
Not smiling $(99.36\%)$
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
Not smiling $(57.88\%)$
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Label - AdvMix (our) training
|
| 50 |
+
Not smiling $(95.82\%)$
|
| 51 |
+
Images under transformation
|
| 52 |
+
Smiling $(99.98\%)$
|
| 53 |
+
Not smiling $(97.26\%)$
|
| 54 |
+
Figure 1. Variations of the same faces. A model obtained through classical training classifies the same face as both "smiling" and "not smiling" (depending on the variations). Our model remains consistent in terms of classification. Note that these persons "do not exist" and have been generated using a StyleGAN model.
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Smiling $(54.90\%)$
|
| 58 |
+
Smiling $(100\%)$
|
| 59 |
+
Smiling $(61.34\%)$
|
| 60 |
+
|
| 61 |
+
when the data encountered during training and deployment is drawn independently from the same distribution. When a mismatch between training and testing data occurs, models can fail in catastrophic ways; and, unfortunately, such occurrence is commonplace: training data is often collected through a biased process that highlights confounding factors and spurious correlations [6, 7], which can lead to undesirable consequences (e.g., http://gendershades.org).
|
| 62 |
+
|
| 63 |
+
The effects of such data shifts are largely detailed in the literature. For example, both Recht et al. [8] and Hendrycks et al. [9] show that the accuracy of IMAGENET models is severely impacted by changes in the data collection process. Methods to counteract such effect, which mainly consist of data augmentation techniques, also struggle. Training against corrupted data only forces the memorization of such corruptions and, as a result, these models fail to generalize to new corruptions [10, 11]. Works such as mixup [12] or AutoAugment [13] pave the way to further improvements,
|
| 64 |
+
|
| 65 |
+
but still require intricate fine-tuning to succeed in practice.
|
| 66 |
+
|
| 67 |
+
Another parallel and important line of work uncovered that the addition of small but carefully chosen deviations to the input, called adversarial perturbations, can cause the neural network to make incorrect predictions with high confidence [14-18]. Techniques to build models that are robust to adversarily perturbed examples, such as adversarial training [19], have received a significant amount of attention in the recent years [16, 20-22]. The existence of imperceptible perturbations that alter a model's output demonstrates that supervised learning algorithms still fail to capture the true causal relationships between signal and label. The degradation of performance occurred when shifting between training and adversarial (or otherwise corrupted) distributions indicates that neural networks pick up on correlations that are not necessarily robust to small input perturbations [23]. The existence of imperceptible adversarial perturbations highlights just one form of spurious correlation that causes undesirable behaviors in the networks we train.
|
| 68 |
+
|
| 69 |
+
This paper focuses on training models that are robust to plausible real-world perturbations that preserve semantic content (such as those presented in Figure 1). We go beyond conventional data augmentation and adversarial training on $l_{p}$ -norm bounded perturbations by leveraging high-quality generative models that can describe such perturbations. In particular, we address the question: "Given a generative model with a sufficiently good disentangled representation that aligns well with the perturbations of interest, can we train neural networks that are resistant to bias and spurious correlations present in the training data?" More specifically, we consider StyleGAN [24] as our underlying generative model. Our contributions are as follows:
|
| 70 |
+
|
| 71 |
+
1. We develop a framework dubbed Adversarial Mixing with Disentangled Representations (AdvMix) which leverages the disentangled latents of a generative model to train networks that are robust to real-world variations.
|
| 72 |
+
2. We demonstrate how to leverage StyleGAN's mixing property to systematically transfer image attributes likely to be misclassified across image instances, thus allowing us to generate realistic worst-case semantic variations. This enables us to define semantic perturbations in a purely data-driven fashion, as opposed to methods that require data collection under different conditions [25].
|
| 73 |
+
3. We conduct extensive experiments on a controlled Color-MNIST dataset that compare Adversarial Mixing with Disentangled Representations with random data augmentation and demonstrate under which conditions AdvMix achieves higher accuracy.
|
| 74 |
+
4. Finally, we demonstrate empirically on CELEBA that accuracy is not necessarily at odds with robustness [26], once we consider semantic variations other than $\ell_p$ -norm bounded variations.
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
Figure 2. Comparison of different data augmentation techniques. These transformations tend to destroy the image semantics.
|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+
# 2. Related work
|
| 84 |
+
|
| 85 |
+
Robustness to $\ell_{p}$ -norm perturbations. Generating pixel-level adversarial perturbations has been and remains extensively studied [16, 18-20, 27, 28]. Most works focus the robustness of classifiers under $\ell_{p}$ -norm bounded perturbations. In particular, it is expected that a robust classifier be invariant to small perturbations in the pixel space (as defined by the $\ell_{p}$ -norm). Goodfellow et al. [16] and Madry et al. [19] laid down foundational principles to train robust networks, and recent works [29, 30] continue to find novel approaches to enhance robustness. While existing work is able to train models that are robust to imperceptible pixel-level variations, the study of robustness against semantically meaningful perturbations is largely under-explored.
|
| 86 |
+
|
| 87 |
+
Adversarial robustness beyond $\ell_p$ -norm. Engstrom et al. [31] and Kanbak et al. [32] explored geometric transformations such as rotations and translation of images. Early works (e.g., Baluja and Fischer [33]) also demonstrated that it is possible to go beyond analytically defined variations by using generative models to create perturbations. Song et al. [34] and Xiao et al. [35] used a pre-trained AC-GAN [36] to generate perturbations; and they demonstrated that it is possible to generate semantically relevant perturbations for tasks such as MNIST, SVHN and CELEBA. Lastly, Qiu et al. [37] have attempted to generate adversarial examples by interpolating through the attribute space defined by a generative model. With the exception of [38], in which the authors strongly limit semantic variations by keeping the perturbed image close to its original counterpart, there has been little to no work demonstrating robustness to large semantically plausible variations. As such the effect of training models robust to such variations is unclear. To the best of our knowledge, this paper is the first to analyze the difference between adversarial training and data augmentation in the space of semantically meaningful variations.
|
| 88 |
+
|
| 89 |
+
Data augmentation Data augmentation can reduce generalization error. For image classification tasks, random flips, rotations and crops are commonly used [39]. More sophisticated techniques such as Cutout [40] (which produces random occlusions), CutMix [41] (which replaces parts of
|
| 90 |
+
|
| 91 |
+
an image with another) and mixup [12] (which linearly interpolates between two images) all demonstrate extremely compelling and surprising results. Indeed, while these methods often result in images that are visibly corrupted and void of semantic meaning (even to the human eye), the resulting models often achieve state-of-the-art accuracy across a wide range of datasets. Figure 2 shows a comparison of these different techniques. Some of these data augmentation techniques have been applied to latent representations of the input (rather than the input itself) [42]. However, these do not focus on the effect of data bias.
|
| 92 |
+
|
| 93 |
+
Causal reasoning using additional data. Heinze-Deml and Meinshausen [43] use grouped observations (e.g., the same object under different conditions) to discover variations that should not explain the classification label. More recently Arjovsky et al. [25] developed a method called Invariant Risk Minimization (IRM) which tries to find an invariant predictor across different environments (or groups of data points). Both methods were able to build classifiers that were less sensitive to spurious correlations, which, in turn, lead to classifiers that were less biased than classifiers trained purely on an original biased training set. However, they require explicitly annotated data collected under different environmental conditions.
|
| 94 |
+
|
| 95 |
+
# 3. Adversarial Mixing with Disentangled Representations
|
| 96 |
+
|
| 97 |
+
In this paper, we consider a model $f_{\theta}$ parametrized by $\theta$ . We would like our model to be robust or invariant to a set of transformations $\mathcal{T}$ . Formally, our goal is to find the model parameters $\theta$ that minimize the semantic adversarial risk
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\underset {(x, y) \sim \mathcal {D}} {\mathbb {E}} \left[ \max _ {t \in \mathcal {T}} L \left(f _ {\theta} (t (x)), y\right) \right], \tag {1}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $\mathcal{D} \subset \mathcal{X} \times \mathcal{Y}$ is a data distribution over pairs of examples $x$ and corresponding labels $y$ , and $L$ is a suitable loss function (such as the $0 - 1$ loss in the context of classification tasks). The set of semantic transformations $\mathcal{T}$ contains functions of the form $t: \mathcal{X} \to \mathcal{X}$ . Each element $t \in \mathcal{T}$ is irreducible and, crucially, for the optimal classifier $f_{\theta}: \mathcal{X} \to \mathcal{Y}$ , we would like that $f_{\theta}(t(x)) = f_{\theta}(x)$ for all $t \in \mathcal{T}$ . For example, an MNIST classifier should not be affected by changes in the digit color. In the following, we define a set of transformations $\mathcal{T}$ via a decoder that leverages a disentangled latent representation and explain how to evaluate the resulting risk in Equation (1).
|
| 104 |
+
|
| 105 |
+
Invariant latent factors. Disentanglement is perceived as a desirable property of representations. Often, one hopes to obtain a representation of the observed data $x \in \mathcal{X}$ in terms of separate and conditionally independent factors $z \in$
|
| 106 |
+
|
| 107 |
+
$\mathcal{Z}$ given $x$ under a certain class of input transformations [44]. In our particular setting, we will assume a task-specific disentangled representation. Formally, we assume that we have an ideal generator (or decoder), $\operatorname{dec} : \mathcal{Z} \to \mathcal{X}$ , where the latent space $\mathcal{Z}$ is a product space of the form $\mathcal{Z} = \mathcal{Z}_{\parallel} \times \mathcal{Z}_{\perp}$ . For a given classification task that predicts the label $y$ , only the coordinates corresponding to $\mathcal{Z}_{\parallel}$ are relevant, while $\mathcal{Z}_{\perp}$ is irrelevant. We formalize the above notions using conditional independence: given an example $x = \operatorname{dec}(z_{\parallel}, z_{\perp})$ with $z_{\perp} \in \mathcal{Z}_{\perp}$ , $z_{\parallel} \in \mathcal{Z}_{\parallel}$ and corresponding label $y \in \mathcal{V}$ , we have
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathbb {P} (y | z _ {\parallel}, z _ {\perp}) = \mathbb {P} (y | z _ {\parallel}). \tag {2}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Hence, the ideal invariant classifier $f^{\star}$ that outputs a probability distribution over $\mathcal{V}$ should be consistent with the invariance assumption
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
f ^ {\star} \left(\operatorname {d e c} \left(z _ {\parallel}, z _ {\perp}\right)\right) = f ^ {\star} \left(\operatorname {d e c} \left(z _ {\parallel}, \tilde {z} _ {\perp}\right)\right) \tag {3}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
for all $\tilde{z}_{\perp}\in \mathcal{Z}_{\perp}$ , and should output the correct label:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\underset {y ^ {\prime} \in \mathcal {Y}} {\operatorname {a r g m a x}} f ^ {\star} (\operatorname {d e c} (z _ {\parallel}, z _ {\perp})) = y. \tag {4}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
Finally, referring back to Equation (1), we define the set of transforms $\mathcal{T}$ that induce semantically irrelevant perturbations as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\begin{array}{l} \mathcal {T} = \{t \mid t (x) = \operatorname {d e c} \left(z _ {\parallel}, \tilde {z} _ {\perp}\right) \text {w i t h} \tilde {z} _ {\perp} \in \mathcal {Z} _ {\perp} \\ s. t. \exists z _ {\perp} x = \operatorname {d e c} \left(z _ {\parallel}, z _ {\perp}\right) \}. \tag {5} \\ \end{array}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
Adversarial training. Given a model $f_{\theta}$ with enough capacity, minimizing the semantic adversarial risk in Equation (1) results in parameters $\theta^{\star}$
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\theta^ {\star} = \underset {\theta} {\operatorname {a r g m i n}} \underset {x = \mathbf {d e c} \left(z _ {\parallel}, z _ {\perp}\right)} {\mathbb {E}} \left[ \max _ {\tilde {z} _ {\perp} \in \mathcal {Z} _ {\perp}} L \left(f _ {\theta} \left(\mathbf {d e c} \left(z _ {\parallel}, \tilde {z} _ {\perp}\right), y\right) \right. \right] \tag {6}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
that satisfy Equations (3) and (4). In other words, there exists no transformation $t \in \mathcal{T}$ that, when applied to $x$ , would result in a misclassification of the optimal classifier $f^{\star} = f_{\theta^{\star}}$ . Solving the saddle point problem in Equation (6) requires solving the corresponding inner-maximization problem
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\tilde {z} _ {\perp} ^ {\star} = \underset {\tilde {z} _ {\perp} \in \mathcal {Z} _ {\perp}} {\operatorname {a r g m a x}} L \left(f _ {\theta} \left(\operatorname {d e c} \left(z _ {\parallel}, \tilde {z} _ {\perp}\right)\right), y\right). \tag {7}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
As enumerating all possible latents $\tilde{z}_{\perp} \in \mathcal{Z}_{\perp}$ is often intractable, we resort to a technique popularized by Madry et al. [19] in the context of adversarial training, which consists of using projected gradient ascent on a differentiable surrogate loss. For a classification task, the $0 - 1$ loss is replaced with the cross-entropy loss:
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\hat {L} \left(f _ {\theta} (x), y\right) = - \log \left(\left[ f _ {\theta} (x) \right] _ {y}\right) \tag {8}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
Figure 3. Illustration of the maximization process in Equation (9).
|
| 151 |
+
|
| 152 |
+
where $[a]_i$ returns the i-th coordinate of $a$ . Gradient ascent steps are then interleaved with projection steps for a given number of iterations $K$ . Formally, we find an estimate $\tilde{z}_{\perp}^{(K)}$ of $\tilde{z}_{\perp}^{\star}$ using the following recursion:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\tilde {z} _ {\perp} ^ {(k + 1)} = \operatorname {p r o j} _ {\mathcal {Z} _ {\perp}} \left(\tilde {z} _ {\perp} ^ {(k)} + \alpha \nabla_ {\tilde {z} _ {\perp} ^ {(k)}} \hat {L} \left(f _ {\theta} \left(\operatorname {d e c} \left(z _ {\parallel}, \tilde {z} _ {\perp} ^ {(k)}\right)\right), y\right)\right) \tag {9}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $\tilde{z}_{\perp}^{(0)}$ is chosen at random within $\mathcal{Z}_{\perp}$ , $\alpha$ is a constant step-size and $\operatorname{proj}_{\mathcal{A}}(a)$ is a projection operator that project $a$ onto $\mathcal{A}$ . Figure 3 illustrates the process.
|
| 159 |
+
|
| 160 |
+
Ultimately, Adversarial Mixing with Disentangled Representations (shortened as AdvMix) tries to find parameters that minimize the worst-case loss that could arise from altering the input examples through plausible transformations. It guarantees that transformations of the input are meaningful by using a disentangled latent representation that encodes independent controllable factors, where some of these factors are known to be independent from the label. Finding such a disentangled representation is rarely possible, as it is not always known which variations of the input should or should not affect the label. In some cases, however, it is possible to train generative models such that we expect some subset of the latents to not affect the label. Section 4 implements AdvMix using a StyleGAN model.
|
| 161 |
+
|
| 162 |
+
Data with low density regions. The motivation behind AdvMix stems from the manifold hypothesis [45]. It states that high dimensional data present in the real-world, such as images, often lies on a low-dimensional manifold. As a consequence, there exists large regions in the input space that are outside the support of the data distribution. Hence, for maximal efficiency, data augmentation and adversarial training should be done carefully to make sure that the augmented data is still within the support of the original data distribution. Data augmentation techniques presented in Figure 2 clearly violate this condition, and despite their success, we cannot expect that they perform well across all datasets (in fact, mixup performs poorly on Color-MNIST). Similarly,
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Figure 4. Comparison of mixup and AdvMix on a toy example. In this example, we are given 200 datapoints. Each data point $(x_{1}, x_{2})$ is sampled according to $x_{1} \sim \mathcal{N}(z_{\perp}, \sqrt{3})$ where $z_{\perp} \in \mathcal{Z}_{\perp} = \{0., 10.\}$ and $x_{2} \sim \mathcal{N}(z_{\parallel}, 1)$ where $z_{\parallel} \in \mathcal{Z}_{\parallel} = \{0., 20.\}$ . The colors represent the label. Note that the latent variable $z_{\parallel} = 20y$ is dependent on the label while $z_{\perp}$ is independent of the label. Panel (a) shows the original set of 200 datapoints; panel (b) shows the effect of sampling additional data using AdvMix; and panel (c) shows the effect of mixup. Of course, we should point out that our method, AdvMix, is aware of the underlying latent representation, while mixup is not.
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+
adversarial training targeting $\ell_p$ -norm bounded perturbations tend to trade-off accuracy for robustness [23]. Figure 4 compares mixup and AdvMix on a toy example. In this example, we artificially construct a dataset with two classes and an underlying disentangled latent representation. We observe that by exploiting the knowledge of the disentangled latent representation, AdvMix is capable of generating additional datapoints that are consistent with the original dataset, while mixup generates additional datapoints that are unlikely.
|
| 172 |
+
|
| 173 |
+
Relationship to mixup. mixup augments data with respect to the input space. Given two pairs of inputs $(x_A, y_A)$ , $(x_B, y_B)$ and a linear interpolation factor sampled from a $\beta$ -distribution $\lambda \sim \beta(\alpha, \alpha)$ , mixup generate a new input pair as follows:
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\tilde {x} = \lambda x _ {A} + (1 - \lambda) x _ {B}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\tilde {y} = \lambda y _ {A} + (1 - \lambda) y _ {B}. \tag {10}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
Our methodology combines inputs $(x_A, y_A)$ and $(x_B, y_B)$ in the latent space. If $x_A = \mathrm{dec}(z_{A\parallel}, z_{A\perp})$ and $x_B = \mathrm{dec}(z_{B\parallel}, z_{B\perp})$ , we obtain
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
\tilde {x} = \operatorname {d e c} \left(z _ {A _ {\parallel}}, z _ {B _ {\perp}}\right)
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
\tilde {y} = y _ {A}. \tag {11}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
Crucially, this combination only affects the latent sub-space that is independent from the label, thus the label remains unchanged. We also note that, unlike [42], no interpolation occurs in the latent space (i.e., $\lambda z_{A\perp} + (1 - \lambda)z_{B\perp}$ ) as this could result in points that are outside $\mathcal{Z}_{\perp}$ when $\mathcal{Z}_{\perp}$ is not convex.
|
| 194 |
+
|
| 195 |
+
Relationship to Invariant Risk Minimization. Arjovsky et al. [25] consider the case where we have multiple datasets
|
| 196 |
+
|
| 197 |
+
$D_{e} = \{x_{i},y_{i}\}_{i = 1}^{n}$ drawn from different training environments $e\in \mathcal{E}$ . As explained in [25], the motivation behind IRM is to minimize the worst-case risk
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\max _ {e \in \mathcal {E}} \underset {(x, y) \in D _ {e}} {\mathbb {E}} [ L (f _ {\theta} (x), y) ]. \tag {12}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
In this paper, the environments are defined by the different instances of $z_{\perp} \in \mathcal{Z}_{\perp}$ . Given a dataset $\{\mathrm{dec}(z_{i_{\parallel}}, z_{i_{\perp}}), y_i\}_{i=1}^n$ , we can rewrite the semantic adversarial risk shown in Equation (1) as Equation (12) by setting the environment set $\mathcal{E}$ to
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
\mathcal {E} = \left\{\left\{\operatorname {d e c} \left(z _ {i _ {\parallel}}, z _ {\perp}\right), y _ {i} \right\} _ {i = 1} ^ {n} \mid z _ {\perp} \in \mathcal {Z} _ {\perp} \right\}. \tag {13}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
This effectively create an ensemble of datasets for all possible combinations of $z_{\perp} \in \mathcal{Z}_{\perp}$ for all examples.
|
| 210 |
+
|
| 211 |
+
The crucial difference between IRM and AdvMix is in the formulation of the risk. While IRM computes the risk by enumerating over a countable set of environments and picking the worst-case, AdvMix attempts to compute the worst-case risk by finding the combination of variations that maximize the risk over all examples.
|
| 212 |
+
|
| 213 |
+
# 4. Implementation using StyleGAN
|
| 214 |
+
|
| 215 |
+
So far, we have assumed the presence of a generator (or decoder) that is capable of using a perfectly disentangled latent representation: we have assumed that this representation is partitioned into two subsets, one of which is known to be independent from the target label. In practice, the methodology is often reversed: generative models are trained in the hope of obtaining some level of disentanglement. If a partition of the trained latent space does not influence the label, we can use the corresponding trained generator within AdvMix. This section explains why StyleGAN is a good candidate and details how to implement AdvMix using StyleGAN. In particular, as we rely on StyleGAN's mixing property to enforce a partitioning of the latents, only three elements are needed: $(i)$ a transformation set $\mathcal{Z}_{\perp}$ from which label-independent variants $\tilde{z}_{\perp}$ can be chosen; $(ii)$ a dataset $\mathcal{D} = \{z_{i\parallel},y_i\}_{i = 1}^n$ of latents and labels; and $(iii)$ an efficient method to find a worst-case variation $\tilde{z}_{\perp}\in \mathcal{Z}_{\perp}$ .
|
| 216 |
+
|
| 217 |
+
StyleGAN. StyleGAN is a generator architecture for generative adversarial networks proposed by Karras et al. [24]. It borrows interesting properties from the style transfer literature [46]. In this work, we rely on the style mixing property. Formally, the StyleGAN architecture is composed of two stages. The first stage takes a latent variable $z \sim \mathcal{N}(\mathbf{0}, \mathbf{1})$ that is not necessarily disentangled and projects it into a disentangled latent space $z = \mathrm{map}(z)$ . The second stage synthesizes an image $x$ from the disentangled latents $z$ using a decoder $x = \mathrm{dec}(z)$ . Overall, the process of generating an image $x$ using a StyleGAN network is defined as
|
| 218 |
+
|
| 219 |
+
$$
|
| 220 |
+
x = \operatorname {d e c} \circ \operatorname {m a p} (\mathbf {z}) \text {w h e r e} \mathbf {z} \sim \mathcal {N} (\mathbf {0}, \mathbf {1}). \tag {14}
|
| 221 |
+
$$
|
| 222 |
+
|
| 223 |
+
The intermediate latent variables $z$ provide some level of disentanglement that affects image generation at different spatial resolutions which allows us to control the synthesis of an image. Particularly, we can apply the "style" of an image to another by mixing the disentangled latents of these images together. In the context of face generation, the styles corresponding to coarse spatial resolutions affect high-level aspects such as pose, and styles of fine resolutions affect mainly the color scheme. In the rest of this manuscript, we focus on variations of the finer style. Concretely, our experiments in Section 5 assume that the fine attributes $z_{\perp}$ are label-independent, while the coarse attributes $z_{\parallel}$ may be label-dependent. Consequently, the finer style $z_{B\perp}$ of an image $x_{\mathrm{B}}$ can be applied to another image $x_{\mathrm{A}} = \mathrm{dec}(z_{A\parallel},z_{A\perp})$ via $\mathrm{dec}(z_{\mathrm{A}\parallel},z_{\mathrm{B}_{\perp}})$ . Figure 5b shows a nominal image and two variations of that image obtained by mixing the finer style of two other images.
|
| 224 |
+
|
| 225 |
+
Definition of the transformation set. For completeness, we now define the set of transforms $\mathcal{T}$ in Equation (5) by defining $\mathcal{Z}_{\perp}$ . While the formulation of StyleGAN allows $z$ to be sampled within an infinite support, our formulation requires $\mathcal{Z}_{\perp}$ to be bounded. Additionally, as explained by Nalisnick et al. [47], due to concentration of measure, a generative model usually draws samples from its typical set [48] (a subset of the model's full support) rather than regions of high probability density. As such, if $z \in \mathbb{R}^d$ , we wish to define $\mathcal{Z}_{\perp}$ as follows:
|
| 226 |
+
|
| 227 |
+
$$
|
| 228 |
+
\mathcal {Z} _ {\perp} = \left\{\operatorname {m a p} (\mathbf {z}) _ {\perp} \left| \sqrt {d} - \delta d ^ {\frac {1}{4}} \leq \| \mathbf {z} \| _ {2} \leq \sqrt {d} + \delta d ^ {\frac {1}{4}} \right. \right\} \tag {15}
|
| 229 |
+
$$
|
| 230 |
+
|
| 231 |
+
where $\delta$ is a small tunable positive constant. In practice, however, we do not want to backpropagate through the map operation as it is inefficient. Instead, a small collection of latents is sampled, passed through the map operation, and $\mathcal{Z}_{\perp}$ is limited to a neighborhood of the points in this collection. This collection is re-sampled for each example and in expectation covers the typical set well (more details are given in Algorithm 2).
|
| 232 |
+
|
| 233 |
+
**Construction of a dataset of disentangled latents.** Constructing a dataset of labelled latents $\mathcal{D} = \{z_{i\parallel},y_i\}_{i = 1}^n$ requires finding the latents $z_{i}$ that decode into each example $x_{i}$ of an original labelled dataset $\{x_{i},y_{i}\}_{i = 1}^{n}$ . Hence, we need to find a mapping between the image space and the latent space. This mapping, which can be computed offline, is used to construct the dataset $\mathcal{D}$ , and is only required once for each new dataset. Specifically, this mapping is denoted as $\mathrm{enc}: \mathcal{X} \mapsto \mathcal{Z}$ and finds $z_{i}$ such that $x_{i} \approx \mathrm{dec}(z_{i})$ .
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(a)
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
(b)
|
| 240 |
+
Figure 5. Panel a shows how the latents are progressively able to match a target image (on the far right). Panel b shows two different variations of the obtained image.
|
| 241 |
+
|
| 242 |
+
# Algorithm 1 Encoder enc
|
| 243 |
+
|
| 244 |
+
Input: Target image $x$ , trained StyleGAN model dec o map, and trained VGG network vgg. $\alpha_{i}$ and $\beta_{i}$ are hyperparameters all set to 1 and 1/5 respectively. $\gamma^{(k)}$ is a step-size schedule.
|
| 245 |
+
Output: Disentangled latents $\hat{z}$ such that $\mathrm{dec}(\hat{z})\approx x$
|
| 246 |
+
1: $\hat{z} \gets \frac{1}{M} \sum_{i=1}^{M} \mathbf{map}(\mathbf{z}^{(i)})$ with $\mathbf{z}^{(i)} \sim \mathcal{N}(\mathbf{0}, \mathbf{1})$ ▷ Average latents
|
| 247 |
+
2: for $k \in \{1, \dots, N\}$ do
|
| 248 |
+
3: $\hat{x} = \operatorname{dec}(\hat{z})$
|
| 249 |
+
4: $\hat{A} = \mathbf{vgg}(\hat{x})$ $\triangleright \hat{A}$ is a list of activations (after the $2^{\mathrm{nd}}$ convolution of $1^{\mathrm{st}}$ , $2^{\mathrm{nd}}$ and $3^{\mathrm{rd}}$ blocks)
|
| 250 |
+
5: $\mathcal{A} = \mathrm{vgg}(x)$
|
| 251 |
+
6: $\mathcal{A}_{\mathrm{mix}} = \mathrm{vgg}(\mathrm{dec}(\hat{z}_\parallel ,\mathrm{map}(\mathbf{z})_\perp)))$ with $\mathbf{z}\sim \mathcal{N}(\mathbf{0},\mathbf{1})$
|
| 252 |
+
7: $L_{\mathrm{reconstruct}} = \alpha_0\| \hat{x} -x\| _2^2 +\sum_{i = 1}^{|\mathcal{A}|}\alpha_i\| \hat{\mathcal{A}}_i - \mathcal{A}_i\| _2^2$
|
| 253 |
+
8: $L_{\mathrm{mix}} = \sum_{i=1}^{|\mathcal{A}|} \beta_i \| \mathcal{A}_{\mathrm{mix},i} - \mathcal{A}_i\|_2^2$ Reconstruction loss $\triangleright$ Mixing loss
|
| 254 |
+
9: $\hat{z} \gets \hat{z} - \gamma^{(k)}\nabla_{\hat{z}}(L_{\mathrm{reconstruct}} + L_{\mathrm{mix}})$
|
| 255 |
+
10: end for
|
| 256 |
+
|
| 257 |
+
Algorithm 1 defines this mapping through an optimization process. Inspired by [50], and rather than relying solely on the distance between pixel values to define the loss of that optimization, we use the perceptual loss [51, 52] – which helps steer the optimization process. The perceptual loss is defined on the intermediate activations of a trained VGG-16 network [53] (see line 7). We also found that the StyleGAN generator, dec, is a surjective mapping between its disentangled latent space and the image space (i.e., multiple latents can decode into the same image). Hence, since we heavily rely on the mixing property of StyleGAN, and to the contrary of [50], we propose to add an additional component to the loss that steers the latents towards a subset of latents that can be mixed. In particular, we add a perceptual loss between the synthesized image and a mixed version of the same image (see lines 6 and 8). Figure 5 shows the evolution of the optimization process as well as mixed variants of the resulting image.
|
| 258 |
+
|
| 259 |
+
# Generating worst-case examples to train robust models.
|
| 260 |
+
|
| 261 |
+
As explained in Section 3, minimizing the semantic adversarial risk requires solving an inner-maximization problem. We rely on projected gradient ascent on the cross-entropy loss $\hat{L}$ to efficiently find perturbed latents $\tilde{z}_{\perp} \in \mathcal{Z}_{\perp}$ such that, when mixed with $z_{\parallel}$ , make the classifier output a label other than the true label. Algorithm 2 illustrates the process. This algorithm approximates the typical set in Equation (15) by randomly sampling initial latents $\tilde{z}_{\perp}^{(0)}$ $N_{r}$ times and project
|
| 262 |
+
|
| 263 |
+
# Algorithm 2 Solution to Equation (7)
|
| 264 |
+
|
| 265 |
+
Input: A nominal input $x$ and label $y$ , a model $f_{\theta}$ , a StyleGAN model dec o map and an encoder enc. $L$ is the $0 - 1$ loss and $\hat{L}$ is the cross-entropy loss.
|
| 266 |
+
|
| 267 |
+
Output: Possible misclassified example $\tilde{x}$
|
| 268 |
+
|
| 269 |
+
1: $\tilde{x}\gets x$
|
| 270 |
+
2: $[z_{\parallel},z_{\perp}] = \mathsf{enc}(x)$ See Algorithm 1
|
| 271 |
+
3: for $r\in \{1,\dots ,N_{\mathrm{r}}\}$ do Repeat $N_{\mathrm{r}}$ times
|
| 272 |
+
4: $\tilde{z}^{(0)}\gets \mathsf{map}(\mathbf{z})_{\perp}$ with $\mathbf{z}\sim \mathcal{N}(\mathbf{0},\mathbf{1})$ ▷ Initial latents
|
| 273 |
+
5: $\tilde{x}^{(0)} = \mathsf{dec}(z_{\parallel},\tilde{z}_{\perp}^{(0)})$
|
| 274 |
+
6: for $k\in \{1,\ldots ,K\}$ do $\triangleright$ K is the number of optimization steps
|
| 275 |
+
7: $\tilde{z}_{\perp}^{(k)} \gets \mathbf{proj}\left(\tilde{z}_{\perp}^{(k-1)} + \alpha \nabla_{\tilde{z}_{\perp}^{(k-1)}} \hat{L}(f_{\theta}(\tilde{x}^{(0)}), y)\right)$
|
| 276 |
+
8: $\tilde{x}^{(k)} = \mathsf{dec}(z_{\parallel},\tilde{z}_{\perp}^{(k)})$
|
| 277 |
+
9: if $L(f_{\theta}(\tilde{x}^{(k)}),y) > L(f_{\theta}(\tilde{x},y)$ then
|
| 278 |
+
10: $\tilde{x}\gets \tilde{x}^{(k)}$
|
| 279 |
+
11: return $\triangleright$ Since $L$ is the $0 - 1$ loss, the procedure can terminate early
|
| 280 |
+
12: end if
|
| 281 |
+
13: end for
|
| 282 |
+
14: end for
|
| 283 |
+
|
| 284 |
+
ing intermediate solutions $\tilde{z}_{\perp}^{(k)}$ back onto a neighborhood of $\tilde{z}_{\perp}^{(0)}$ .<sup>3</sup> It refines the initial latents using gradient ascent with the goal of finding latents $\tilde{z}_{\perp}^{(K)}$ that, when mixed with the original image latents $z_{\parallel}$ , generate an image $\mathrm{dec}(z_{\parallel}, \tilde{z}_{\perp}^{(K)})$ that is misclassified. Figure 1 shows the result of this optimization procedure where the original image (on the top-left) is classified as "not smiling" and the optimized image (on the bottom-left) is classified as "smiling". Once perturbed latents $\tilde{z}_{\perp} = \tilde{z}_{\perp}^{(K)}$ are found, we can compute the cross-entropy loss on the image generated by $\mathrm{dec}(z_{i\parallel}, \tilde{z}_{\perp})$ . Formally, for a classifier $f_{\theta}$ and a dataset $\mathcal{D} = \{z_{i\parallel}, y_i\}_{i=1}^n$ , we want to solve
|
| 285 |
+
|
| 286 |
+
$$
|
| 287 |
+
\operatorname {a r g m i n} _ {\theta} \mathbb {E} _ {z _ {i _ {\parallel}}, y _ {i} \sim D} \left[ L \left(f _ {\theta} \left(\operatorname {d e c} \left(z _ {i _ {\parallel}}, \tilde {z} _ {\perp}\right)\right), y _ {i}\right) \right] \tag {16}
|
| 288 |
+
$$
|
| 289 |
+
|
| 290 |
+
$$
|
| 291 |
+
\text {a n d} \tilde {z} _ {\perp} = \operatorname {a r g m a x} _ {z _ {\perp} \in \mathcal {Z} _ {\perp}} L \left(f _ {\theta} \left(\operatorname {d e c} \left(z _ {i \|}, z _ {\perp}\right)\right), y _ {i}\right).
|
| 292 |
+
$$
|
| 293 |
+
|
| 294 |
+
Random mixing with disentangled representations. While this section describes an instantiation of AdvMix using StyleGAN, it is possible to formulate an equivalent random data augmentation baseline. For an input $x$ , we generate a
|
| 295 |
+
|
| 296 |
+

|
| 297 |
+
Figure 6. Mean colors given to each digit in the training set of our Color-MNIST case-study.
|
| 298 |
+
|
| 299 |
+
random variation as follows:
|
| 300 |
+
|
| 301 |
+
$$
|
| 302 |
+
\tilde {x} = \operatorname {d e c} (\operatorname {e n c} (x) _ {\parallel}, \operatorname {m a p} (\mathbf {z}) _ {\perp}) \text {w i t h} \mathbf {z} \sim \mathcal {N} (\mathbf {0}, \mathbf {1}) \tag {17}
|
| 303 |
+
$$
|
| 304 |
+
|
| 305 |
+
# 5. Results
|
| 306 |
+
|
| 307 |
+
In this section, we compare AdvMix to (i) nominal training which minimizes the empirical risk, (ii) Adversarial Training (AT) which minimizes the adversarial risk over $\ell_{\infty}$ -norm bounded perturbations of size $\epsilon$ in input space [19], and (iii) Random Mixing with Disentangled Representations (RandMix) which minimizes the vicinal risk by randomly sampling latents from $\mathcal{Z}_{\perp}$ (rather than systematically finding the worst-case variations). We perform two experiments to assess the generalization abilities of AdvMix. The first experiment is done on an artificially constructed dataset called Color-MNIST (it bares resemblance to the Color-MNIST experiments present in [25]). The second experiment uses CELEBA. Both experiment demonstrate that methods using semantic variations as expressed by a trained StyleGAN model achieve higher accuracy. It also demonstrates that, when the distribution of variations is skewed (i.e., some variations $z_{\perp}$ appear more often than others in the dataset used to train the StyleGAN model), AdvMix obtains higher accuracy than RandMix. For both experiments, we train a truncated VGG network with 5 layers using 5 epochs on Color-MNIST and 20 epochs on CELEBA. We use the Adam [54] optimizer with a learning rate of $10^{-3}$ . AdvMix is trained with $N_{\mathrm{r}}$ set to 5.
|
| 308 |
+
|
| 309 |
+
# 5.1. Color-MNIST
|
| 310 |
+
|
| 311 |
+
Color-MNIST consists of a dataset of MNIST [55] digits that are artificially colored to emphasize bias. On the training set, we color each pair $(x,y)$ of the original MNIST dataset with a color drawn randomly from a normal distribution with mean $\mu_y$ and standard deviation $\sigma$ (means $\mu_y$ for $y\in \{0,\dots ,9\}$ are shown in Figure 6). On the test set, we color digits uniformly at random. In other words, the colors present in the training set spuriously correlate with the label. We can use $\sigma$ to affect this correlation: by progressively increasing $\sigma$ the dataset becomes less biased. For all techniques (including mixup), we vary the level of bias and train models using 5 epochs. The StyleGAN model is trained on the training set only, once for each setting of $\sigma$ . The disentangled latents defining the finer style correspond to the final resolution of $32\times 32$ .
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 7. Accuracy of different training methods on images from our unbiased Color-MNIST test set. The training set is progressively debiased by increasing the standard deviation of the colors present.
|
| 315 |
+
|
| 316 |
+
Table 1. Effect of bias when training a StyleGAN model on our Color-MNIST dataset.
|
| 317 |
+
|
| 318 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Test accuracy on clean images</td></tr><tr><td>Unbiased</td><td>Less biased</td><td>More biased</td></tr><tr><td>RandMix</td><td>99.11%</td><td>98.87%</td><td>97.63%</td></tr><tr><td>AdvMix</td><td>99.19%</td><td>99.07%</td><td>98.79%</td></tr></table>
|
| 319 |
+
|
| 320 |
+
Figure 7 shows the results. Across all settings, RandMix and AdvMix outperform the other methods. As expected, the gap between all methods decreases as the training set becomes less biased. It is also worth noting that AT is useful (compared to nominal training and mixup) as on this dataset $\ell_{\infty}$ -norm bounded perturbations allow the exploration of slight variations in colors. RandMix and AdvMix are both expected to do well as all variations $z_{\perp}$ (that correspond to applications of different colors) are equally likely to be drawn from the StyleGAN model (since they are uniformly distributed in the training set).
|
| 321 |
+
|
| 322 |
+
To further emphasize the difference between RandMix and AdvMix, we purposefully bias the training of the StyleGAN model. We create two additional datasets (with $\sigma = 0$ ). With the first dataset (named "more biased"), the StyleGAN model is trained on a large fraction of zeros (and few other digits), while on the second dataset (named "less biased", the StyleGAN model is trained on a large fraction of zeros and ones. As a result, rarely occurring variations (colors of digits from 1 to 9 for the first dataset and colors of digits from 2 to 9 for the second) are less likely to be randomly selected by RandMix. Table 1 shows the results. We observe that AdvMix performs better. However, we note that the gap is not large, as all color variations all contain red, green and blue components (which allows the network to implicitly learn about other color combinations).
|
| 323 |
+
|
| 324 |
+
Finally, to create a stronger effect, we limit digits to the
|
| 325 |
+
|
| 326 |
+
Table 2. Effect of bias when training a StyleGAN model on our RGB Color-MNIST dataset (limited to red, blue or green colors). The classifier is a linear model (instead of a convolutional network).
|
| 327 |
+
|
| 328 |
+
<table><tr><td rowspan="3">Method</td><td colspan="3">Test accuracy on clean images</td></tr><tr><td>Unbiased</td><td>99% red</td><td>99.9% red</td></tr><tr><td></td><td>Less biased</td><td>More biased</td></tr><tr><td>RandMix</td><td>88.55%</td><td>83.18%</td><td>53.56%</td></tr><tr><td>AdvMix</td><td>85.07%</td><td>85.02%</td><td>85.00%</td></tr></table>
|
| 329 |
+
|
| 330 |
+
red, green and blue colors only (resulting in new datasets), and use a linear classifier (instead of a truncated VGG network). Table 2 demonstrates that, when the StyleGAN model is trained with a significant proportion of red digits, AdvMix does much better. Indeed, AdvMix is able to systematically find the corner cases (i.e., green and blue variations) that are currently misclassified rather than relying on the random sampling of such cases. We note that adversarial training can result in unstable learning, which can explain why RandMix does slightly better when the StyleGAN model is unbiased.
|
| 331 |
+
|
| 332 |
+
# 5.2. CELEBA
|
| 333 |
+
|
| 334 |
+
CELEBA [56] is a large-scale public dataset with forty different face attribute annotations including whether a person smiles or wears a hat. We make no modifications to the dataset and use a pretrained StyleGAN model. For all techniques, we train models using 20 epochs. We evaluate all methods on their ability to classify the "smiling" attribute, as well as three other attributes. In this experiment, the disentangled latents defining the finer style correspond to resolutions ranging from $128 \times 128$ to $1024 \times 1024$ .
|
| 335 |
+
|
| 336 |
+
In Table 3, we observe that AdvMix is the only method that systematically achieves high accuracy. This clearly demonstrates AdvMix can lead to a lower generalization error. It is also interesting to see that RandMix does not always improve on nominal training and that AT consistently trades off clean accuracy for $\ell_{\infty}$ -robustness (as seen in [23]). Finally, Figure 8 shows qualitative examples of images that are all correctly classified by the nominal model, but for which we can find plausible variants that are misclassified. Appendix B shows more results and includes other data augmentation schemes.
|
| 337 |
+
|
| 338 |
+
Overall, these results are confirming the observations made on the Color-MNIST dataset. They seem to indicate that there is a slightly distributional shift between CELEBA's train and test sets (at least when it comes to the finer image style). By systematically probing variations that are difficult to classify, AdvMix is able to overcome this shift and reach
|
| 339 |
+
|
| 340 |
+
Table 3. Test accuracy on different classification tasks of the CELEBA dataset.
|
| 341 |
+
|
| 342 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">Test accuracy on attribute</td></tr><tr><td>#1</td><td>#2 (smiling)</td><td>#3</td><td>#4</td></tr><tr><td>Nominal</td><td>96.49%</td><td>90.22%</td><td>83.52%</td><td>78.05%</td></tr><tr><td>AT (ε = 4/255)</td><td>95.34%</td><td>91.11%</td><td>81.43%</td><td>76.61%</td></tr><tr><td>AT (ε = 8/255)</td><td>95.22%</td><td>89.29%</td><td>79.46%</td><td>74.39%</td></tr><tr><td>RandMix</td><td>96.70%</td><td>90.36%</td><td>84.49%</td><td>76.41%</td></tr><tr><td>AdvMix</td><td>97.56%</td><td>92.29%</td><td>85.65%</td><td>79.47%</td></tr></table>
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
Cean
|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
Perturbed
|
| 357 |
+
|
| 358 |
+

|
| 359 |
+
Figure 8. The top row shows examples of clean images from CELEBA that are all classified correctly by the nominal model. The bottom row shows semantically plausible variants of these images that are all misclassified.
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
|
| 367 |
+
better classification accuracy (to the contrary of RandMix which can only stumble on difficult variants by chance).
|
| 368 |
+
|
| 369 |
+
# 6. Conclusion
|
| 370 |
+
|
| 371 |
+
We have demonstrated a novel approach to achieving robustness to input variations encountered in the real world by generating adversarial instances that compose disentangled representations. We have shown how this framework can be realized by leveraging the StyleGAN architecture – resulting in models that are not only robust to systematic evaluation of insensitivity to variations but also exhibit better generalization, demonstrating that that accuracy is not necessarily at odds with robustness. Our formulation relies on good generative models that can learn a disentangled representation from which some directions are orthogonal to the label we are trying to predict. Methods such as AdvMix are intended to be used to reduce the effect of bias and spurious correlations on classifiers.<sup>8</sup> We hope the promising results shown in this paper encourage the development of more effective disentangled representations that cover most factors of variations encountered in the real world. Finally, we hope this work leads to the exploration of this paradigm in the context of other Computer Vision applications and leads to the development of robust perception systems that can be safely used in the real world.
|
| 372 |
+
|
| 373 |
+
# References
|
| 374 |
+
|
| 375 |
+
[1] V. Vapnik, "Statistical learning theory," 1998. 1
|
| 376 |
+
[2] I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. MIT Press, 2016. [Online]. Available: http://www.deeplearningbook.org 1
|
| 377 |
+
[3] A. Krizhevsky, I. Sutskever, and G. E. Hinton, "Imagenet classification with deep convolutional neural networks," in Advances in neural information processing systems, 2012, pp. 1097-1105.
|
| 378 |
+
[4] G. Hinton, L. Deng, D. Yu, G. E. Dahl, A.-r. Mohamed, N. Jaitly, A. Senior, V. Vanhoucke, P. Nguyen, T. N. Sainath, and others, "Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups," IEEE Signal processing magazine, vol. 29, no. 6, pp. 82-97, 2012. 1
|
| 379 |
+
[5] K. D. Julian, J. Lopez, J. S. Brush, M. P. Owen, and M. J. Kochenderfer, "Policy compression for aircraft collision avoidance systems," in IEEE/AIAA 35th Digital Avionics Systems Conference (DASC). IEEE, 2016, pp. 1-10. 1
|
| 380 |
+
[6] A. Torralba, A. A. Efros et al., "Unbiased look at dataset bias." in CVPR, vol. 1, no. 2. Citeseer, 2011, p. 7. 1
|
| 381 |
+
[7] A. Kuehlkamp, B. Becker, and K. Bowyer, "Gender-from-iris or gender-from-mascara?" in 2017 IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE, 2017, pp. 1151-1159. 1
|
| 382 |
+
[8] B. Recht, R. Roelofs, L. Schmidt, and V. Shankar, “Do imagenet classifiers generalize to imagenet?” arXiv preprint arXiv:1902.10811, 2019. 1
|
| 383 |
+
[9] D. Hendrycks, K. Zhao, S. Basart, J. Steinhardt, and D. Song, “Natural adversarial examples,” arXiv preprint arXiv:1907.07174, 2019. 1
|
| 384 |
+
[10] I. Vasiljevic, A. Chakrabarti, and G. Shakhnarovich, "Examining the impact of blur on recognition by convolutional networks," arXiv preprint arXiv:1611.05760, 2016. 1
|
| 385 |
+
[11] R. Geirhos, C. R. Temme, J. Rauber, H. H. Schütt, M. Bethge, and F. A. Wichmann, “Generalisation in humans and deep neural networks,” in Advances in Neural Information Processing Systems, 2018, pp. 7538–7550. 1
|
| 386 |
+
[12] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, "mixup: Beyond empirical risk minimization," arXiv preprint arXiv:1710.09412, 2017. 1, 3
|
| 387 |
+
[13] E. D. Cubuk, B. Zoph, D. Mane, V. Vasudevan, and Q. V. Le, "Autoaugment: Learning augmentation policies from data," arXiv preprint arXiv:1805.09501, 2018. 1
|
| 388 |
+
[14] N. Carlini and D. Wagner, "Adversarial examples are not easily detected: Bypassing ten detection methods," in Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security. ACM, 2017, pp. 3-14. 2
|
| 389 |
+
[15] ——, “Towards evaluating the robustness of neural networks,” in 2017 IEEE Symposium on Security and Privacy. IEEE, 2017, pp. 39-57.
|
| 390 |
+
[16] I. J. Goodfellow, J. Shlens, and C. Szegedy, “Explaining and harnessing adversarial examples,” arXiv preprint arXiv:1412.6572, 2014. 2
|
| 391 |
+
[17] A. Kurakin, I. Goodfellow, and S. Bengio, “Adversarial machine learning at scale,” arXiv preprint arXiv:1611.01236, 2016.
|
| 392 |
+
|
| 393 |
+
[18] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus, "Intriguing properties of neural networks," arXiv preprint arXiv:1312.6199, 2013. 2
|
| 394 |
+
[19] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu, "Towards deep learning models resistant to adversarial attacks," arXiv preprint arXiv:1706.06083, 2017. 2, 3, 7
|
| 395 |
+
[20] N. Papernot, P. McDaniel, X. Wu, S. Jha, and A. Swami, "Distillation as a defense to adversarial perturbations against deep neural networks," arXiv preprint arXiv:1511.04508, 2015. 2
|
| 396 |
+
[21] H. Kannan, A. Kurakin, and I. Goodfellow, "Adversarial Logit Pairing," arXiv preprint arXiv:1803.06373, 2018.
|
| 397 |
+
[22] C. Xie, Y. Wu, L. van der Maaten, A. Yuille, and K. He, “Feature denoising for improving adversarial robustness,” arXiv preprint arXiv:1812.03411, 2018. 2
|
| 398 |
+
[23] A. Ilyas, S. Santurkar, D. Tsipras, L. Engstrom, B. Tran, and A. Madry, "Adversarial examples are not bugs, they are features," arXiv preprint arXiv:1905.02175, 2019. 2, 4, 8
|
| 399 |
+
[24] T. Karras, S. Laine, and T. Aila, “A style-based generator architecture for generative adversarial networks,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019, pp. 4401–4410. 2, 5
|
| 400 |
+
[25] M. Arjovsky, L. Bottou, I. Gulrajani, and D. Lopez-Paz, "Invariant risk minimization," arXiv preprint arXiv:1907.02893, 2019. 2, 3, 4, 5, 7
|
| 401 |
+
[26] D. Tsipras, S. Santurkar, L. Engstrom, A. Turner, and A. Madry, "Robustness may be at odds with accuracy," arXiv preprint arXiv:1805.12152, 2018. 2
|
| 402 |
+
[27] A. Kurakin, I. Goodfellow, and S. Bengio, “Adversarial examples in the physical world,” arXiv preprint arXiv:1607.02533, 2016. 2
|
| 403 |
+
[28] S.-M. Moosavi-Dezfooli, A. Fawzi, J. Uesato, and P. Frossard, "Robustness via curvature regularization, and vice versa," arXiv preprint arXiv:1811.09716, 2018. 2
|
| 404 |
+
[29] H. Zhang, Y. Yu, J. Jiao, E. P. Xing, L. E. Ghaoui, and M. I. Jordan, "Theoretically principled trade-off between robustness and accuracy," arXiv preprint arXiv:1901.08573, 2019. 2
|
| 405 |
+
[30] C. Qin, J. Martens, S. Gowal, D. Krishnan, A. Fawzi, S. De, R. Stanforth, P. Kohli et al., "Adversarial robustness through local linearization," arXiv preprint arXiv:1907.02610, 2019. 2
|
| 406 |
+
[31] L. Engstrom, B. Tran, D. Tsipras, L. Schmidt, and A. Madry, "A rotation and a translation suffice: Fooling cnns with simple transformations," arXiv preprint arXiv:1712.02779, 2017. 2
|
| 407 |
+
[32] C. Kanbak, S.-M. Moosavi-Dezfooli, and P. Frossard, "Geometric robustness of deep networks: analysis and improvement," in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 4441-4449. 2
|
| 408 |
+
[33] S. Baluja and I. Fischer, “Adversarial transformation networks: Learning to generate adversarial examples,” arXiv preprint arXiv:1703.09387, 2017. 2
|
| 409 |
+
[34] Y. Song, R. Shu, N. Kushman, and S. Ermon, "Constructing unrestricted adversarial examples with generative models," in Advances in Neural Information Processing Systems, 2018, pp. 8312-8323. 2
|
| 410 |
+
[35] C. Xiao, B. Li, J.-Y. Zhu, W. He, M. Liu, and D. Song, "Generating adversarial examples with adversarial networks," arXiv preprint arXiv:1801.02610, 2018. 2
|
| 411 |
+
|
| 412 |
+
[36] A. Odena, C. Olah, and J. Shlens, "Conditional image synthesis with auxiliary classifier gans," in Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR.org, 2017, pp. 2642-2651. 2
|
| 413 |
+
[37] H. Qiu, C. Xiao, L. Yang, X. Yan, H. Lee, and B. Li, "Semanticadv: Generating adversarial examples via attribute-conditional image editing," arXiv preprint arXiv:1906.07927, 2019. 2
|
| 414 |
+
[38] A. Jalal, A. Ilyas, C. Daskalakis, and A. G. Dimakis, “The robust manifold defense: Adversarial training using generative models,” arXiv preprint arXiv:1712.09196, 2017. 2
|
| 415 |
+
[39] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778. 2
|
| 416 |
+
[40] T. DeVries and G. W. Taylor, "Improved regularization of convolutional neural networks with cutout," arXiv preprint arXiv:1708.04552, 2017. 2
|
| 417 |
+
[41] S. Yun, D. Han, S. J. Oh, S. Chun, J. Choe, and Y. Yoo, "Cutmix: Regularization strategy to train strong classifiers with localizable features," arXiv preprint arXiv:1905.04899, 2019. 2
|
| 418 |
+
[42] V. Verma, A. Lamb, C. Beckham, A. Najafi, I. Mitliagkas, A. Courville, D. Lopez-Paz, and Y. Bengio, "Manifold mixup: Better representations by interpolating hidden states," arXiv preprint arXiv:1806.05236, 2018. 3, 4
|
| 419 |
+
[43] C. Heinze-Deml and N. Meinshausen, “Conditional variance penalties and domain shift robustness,” arXiv preprint arXiv:1710.11469, 2017. 3
|
| 420 |
+
[44] I. Higgins, D. Amos, D. Pfau, S. Racaniere, L. Matthey, D. Rezende, and A. Lerchner, “Towards a Definition of Disentangled Representations,” arXiv e-prints, p. arXiv:1812.02230, Dec 2018. 3
|
| 421 |
+
[45] C. Fefferman, S. Mitter, and H. Narayanan, “Testing the manifold hypothesis,” Journal of the American Mathematical Society, vol. 29, no. 4, pp. 983–1049, 2016. 4
|
| 422 |
+
[46] X. Huang and S. Belongie, “Arbitrary style transfer in real-time with adaptive instance normalization,” in Proceedings of the IEEE International Conference on Computer Vision, 2017, pp. 1501–1510. 5
|
| 423 |
+
[47] E. Nalisnick, A. Matsukawa, Y. W. Teh, and B. Lakshminarayanan, "Detecting out-of-distribution inputs to deep generative models using a test for typicality," arXiv preprint arXiv:1906.02994, 2019. 5
|
| 424 |
+
[48] T. M. Cover and J. A. Thomas, Elements of information theory. John Wiley & Sons, 2012. 5
|
| 425 |
+
[49] R. Vershynin, High-dimensional probability: An introduction with applications in data science. Cambridge University Press, 2018, vol. 47. 5
|
| 426 |
+
[50] R. Abdal, Y. Qin, and P. Wonka, “Image2stylegan: How to embed images into the stylegan latent space?” arXiv preprint arXiv:1904.03189, 2019. 6
|
| 427 |
+
[51] J. Johnson, A. Alahi, and L. Fei-Fei, “Perceptual losses for real-time style transfer and super-resolution,” in European conference on computer vision. Springer, 2016, pp. 694–711. 6
|
| 428 |
+
[52] A. Dosovitskiy and T. Brox, "Generating images with perceptual similarity metrics based on deep networks," in Advances
|
| 429 |
+
|
| 430 |
+
in neural information processing systems, 2016, pp. 658-666. 6
|
| 431 |
+
[53] K. Simonyan and A. Zisserman, “Very deep convolutional networks for large-scale image recognition,” arXiv preprint arXiv:1409.1556, 2014. 6
|
| 432 |
+
[54] D. P. Kingma and J. Ba, "Adam: A method for stochastic optimization," arXiv preprint arXiv:1412.6980, 2014. 7
|
| 433 |
+
[55] Y. LeCun and C. Cortes, “MNIST handwritten digit database,” 2010. [Online]. Available: http://yann.lecun.com/exdb/mnist/7
|
| 434 |
+
[56] Z. Liu, P. Luo, X. Wang, and X. Tang, “Deep learning face attributes in the wild,” in Proceedings of the IEEE international conference on computer vision, 2015, pp. 3730–3738. 8
|
| 435 |
+
[57] M. Abadi, P. Barham, J. Chen, Z. Chen, A. Davis, J. Dean, M. Devin, S. Ghemawat, G. Irving, M. Isard, and others, "Tensorflow: a system for large-scale machine learning," in OSDI, vol. 16, 2016, pp. 265-283. 15
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23db475651f81162ff3546a0ea0eda3e815a2d5bce1473dac7e57ed9028c2c62
|
| 3 |
+
size 342934
|
achievingrobustnessinthewildviaadversarialmixingwithdisentangledrepresentations/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:232d1aad80fc53cdfb82441b39f8ca835236dedbd6500d91490bb546e86e7bb5
|
| 3 |
+
size 605609
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df4413be24e56b7c6bdcae83200228f4f7e9cd911d136ac70eae77e25643ad5d
|
| 3 |
+
size 86171
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e0c9aad5d2681fea0090c7c852ad4d191be565cf762d2cb651d7af2ae9278ec
|
| 3 |
+
size 106446
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/a7dc8fab-d5ca-4973-8a26-4820902b74f7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7cf9dfb9bf9cd016bcdd0c1aa6d747611463548f885c3eecdeddda34b47ca8d
|
| 3 |
+
size 640786
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/full.md
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACNe: Attentive Context Normalization for Robust Permutation-Equivariant Learning
|
| 2 |
+
|
| 3 |
+
Weiwei Sun<sup>1</sup> Wei Jiang<sup>1</sup> Eduard Trulls<sup>2</sup> Andrea Tagliasacchi<sup>3</sup> Kwang Moo Yi<sup>1</sup>
|
| 4 |
+
<sup>1</sup>University of Victoria <sup>2</sup>Google Research, Zurich <sup>3</sup>Google Research, Toronto
|
| 5 |
+
{weiweisun, jiangwei, kyi}@uvic.ca {trulls, taglia}@google.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Many problems in computer vision require dealing with sparse, unordered data in the form of point clouds. Permutation-equivariant networks have become a popular solution - they operate on individual data points with simple perceptrons and extract contextual information with global pooling. This can be achieved with a simple normalization of the feature maps, a global operation that is unaffected by the order. In this paper, we propose Attentive Context Normalization (ACN), a simple yet effective technique to build permutation-equivariant networks robust to outliers. Specifically, we show how to normalize the feature maps with weights that are estimated within the network, excluding outliers from this normalization. We use this mechanism to leverage two types of attention: local and global - by combining them, our method is able to find the essential data points in high-dimensional space to solve a given task. We demonstrate through extensive experiments that our approach, which we call Attentive Context Networks (ACNe), provides a significant leap in performance compared to the state-of-the-art on camera pose estimation, robust fitting, and point cloud classification under noise and outliers. Source code: https://github.com/vcg-uvic/acne.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Several problems in computer vision require processing sparse, unordered collections of vectors $\mathcal{P} = \{\mathbf{p}_n\in \mathbb{R}^D\}$ , commonly called clouds. Examples include pixel locations $(D = 2)$ , point clouds from depth sensors $(D = 3)$ , and sparse correspondences across a pair of images $(D = 4)$ . The latter includes wide-baseline stereo, one of the fundamental problems in computer vision. It lies at the core of Structure-from-Motion (SfM), which, in turn, is the building block of applications such as 3D reconstruction [1], image-based rendering [43] and time-lapse smoothing [28].
|
| 14 |
+
|
| 15 |
+
Wide-baseline stereo has been traditionally solved by extracting small collections of discrete keypoints [31] and
|
| 16 |
+
|
| 17 |
+
finding correspondences among them with robust estimators [16], a reliable approach used for well over two decades. This has changed over the past few years, with the arrival of deep learning and an abundance of new dense [57, 47, 60] and sparse [55, 12, 39, 59, 27] methods. Here, we focus on sparse methods, which have seen many recent developments made possible by the introduction of PointNets [36, 37] – neural networks that rely on multi-layer perceptrons and global pooling to process unordered data in a permutation-equivariant manner – something which is not feasible with neither convolutional nor fully-connected layers.
|
| 18 |
+
|
| 19 |
+
Networks of this type - hereafter referred to as permutation-equivariant networks - have pioneered the application of deep learning to point clouds. The original PointNet relied on the concatenation of point-wise (context-agnostic) and global (point-agnostic) features to achieve permutation equivariance. Yi et al. [55] proposed Context Normalization (CN) as a simple, yet effective alternative to global feature pooling: all it requires is a non-parametric normalization of the feature maps to zero mean and unit variance. Contrary to other normalization techniques utilized by neural networks [22, 2, 46, 51], whose primary objective is to improve convergence, context normalization is used to generate contextual information while preserving permutation equivariance. Despite its simplicity, it proved more effective than the PointNet approach on wide-baseline stereo, contributing to a relative increase in pose estimation accuracy of $50 - 100\%$ ; see [55, Fig. 5].
|
| 20 |
+
|
| 21 |
+
Note that CN normalizes the feature maps according to first- (mean) and second- (variance) order moments. Interestingly, these two quantities can be expressed as the solution of a least-squares problem:
|
| 22 |
+
|
| 23 |
+
$$
|
| 24 |
+
\hat {\boldsymbol {\mu}} = \underset {\boldsymbol {\mu}} {\operatorname {a r g m i n}} \sum_ {n} \| \mathbf {p} _ {n} - \boldsymbol {\mu} \| _ {2} ^ {2} \tag {1}
|
| 25 |
+
$$
|
| 26 |
+
|
| 27 |
+
$$
|
| 28 |
+
\hat {\boldsymbol {\sigma}} = \underset {\boldsymbol {\sigma}} {\operatorname {a r g m i n}} \sum_ {n} \left| \left| \left| \mathbf {p} _ {n} - \hat {\boldsymbol {\mu}} \right| \right| _ {2} ^ {2} - \left. \boldsymbol {\sigma} ^ {\circ 2} \right| _ {2} ^ {2} \right. \tag {2}
|
| 29 |
+
$$
|
| 30 |
+
|
| 31 |
+
However, it is well known that least-squares optimization is not robust to outliers [6, Sec. 3], a problem that also afflicts CN. We illustrate this limitation in Fig. 1, where
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 1. Robust neural line fitting - We learn to fit lines with outliers (80%) via our ACNe, as well as CNe [55]. We visualize the ground truth and the network estimates. We color-code the weights learned by the k-th residual layer of ACNe and used to normalize the feature maps - notice that our method, which mimics Iterative Re-weighted Least Squares (IRLS), learns to progressively focus its attention on the inliers. This allows ACNe to find the correct solution where CNe fails.
|
| 43 |
+
|
| 44 |
+
the toy task is to fit a line to data corrupted by outliers. Note that this is a critical weakness, as the application CN was originally devised for, wide-baseline stereo, a problem plagued with outliers : outlier ratios above $80\%$ are typical in standard public datasets; see Section 4.3.
|
| 45 |
+
|
| 46 |
+
To address this issue, we take inspiration from a classical technique used in robust optimization: Iteratively Weighted Least Squares (IRLS) [8]. As an example, let us consider the computation of the first-order moment (1). Rather than using the square of the residuals, we can optimize with respect to a robust kernel $\kappa$ that allows for outliers to be ignored:
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\underset {\boldsymbol {\mu}} {\operatorname {a r g m i n}} \sum_ {n} \kappa \left(\left\| \mathbf {p} _ {n} - \boldsymbol {\mu} \right\| _ {2}\right), \tag {3}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
which can then be converted back into an iterative least-squares optimization ( $t$ indexes iterations):
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
\underset {\boldsymbol {\mu} ^ {t}} {\operatorname {a r g m i n}} \sum_ {n} \underbrace {\psi \left(\left\| \mathbf {p} _ {n} - \boldsymbol {\mu} ^ {t - 1} \right\| _ {2}\right) ^ {- 1}} _ {\text {a t t e n t i o n} w _ {n} ^ {t}} \| \mathbf {p} _ {n} - \boldsymbol {\mu} ^ {t} \| _ {2} ^ {2}, \tag {4}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
where $\psi(\cdot)$ is the penalty function associated with the kernel $\kappa(\cdot)$ ; see [33, 17]. Inspired by this, we design a network that learns to progressively focus its attention on the inliers, operating analogously to $\psi(\cdot)$ over the IRLS iterations.
|
| 59 |
+
|
| 60 |
+
Specifically, we propose to train a perceptron that translates the (intermediate) feature maps into their corresponding attention weights, and normalizes them accordingly. We denote this approach as Attentive Context Normalization (ACN), and the networks that rely on this mechanism Attentive Context Networks (ACNe). We consider two types of attention, one that operates on each data point individually (local), and one that estimates the relative importance of data points (global), and demonstrate that using them together yields the best performance. We also evaluate the effect of supervising this attention mechanism when possible. We verify the effectiveness of our method on (1) robust line fitting, (2) classification of 2D and 3D point clouds, and (3) wide-baseline stereo on real-world datasets (outdoors and indoors) showing significant improvements over the state of the art. Our work is, to the best our knowledge, the first to
|
| 61 |
+
|
| 62 |
+
apply attentive mechanisms to the normalization of feature maps. One can also apply a more common form of attention by operating directly on feature maps [49, 15], but we demonstrate that this does not perform as effectively.
|
| 63 |
+
|
| 64 |
+
# 2. Related work
|
| 65 |
+
|
| 66 |
+
We discuss recent works on deep networks operating on point clouds, review various normalization methods for deep networks, and briefly discuss attention mechanisms.
|
| 67 |
+
|
| 68 |
+
Deep networks for point clouds. Several methods have been proposed to process point cloud data with neural networks. These include graph convolutional networks [13, 26], VoxelNets [61], tangent convolutions [44], and many others. A simpler strategy was introduced by PointNets [36, 37], which has since become a popular solution due to its simplicity. At their core, they are convolutional neural networks with $1 \times 1$ kernels and global pooling operations. Enhancements to the PointNet architecture include incorporating locality information with kernel correlation [42], and contextual information with LSTMs [30]. Another relevant work is Deep Sets [56], which derives neural network parameterizations that guarantee permutation-equivariance.
|
| 69 |
+
|
| 70 |
+
Permutation-equivariant networks for stereo. While PointNets were originally introduced for segmentation and classification of 3D point clouds, Yi et al. [55] demonstrated that they can also be highly effective for robust matching in stereo, showing a drastic leap in performance against hand-crafted methods [16, 45, 5]. The core ingredient of Yi et al. [55] is Context Normalization (CN), an alternative to global feature pooling from PointNets. While similar to other normalization techniques for deep networks [22, 2, 46, 51], CN has a different role – to aggregate point-wise feature maps and generate contextual information. Follow-ups to CN include the use of architectures similar to Yi et al. [55] to iteratively estimate fundamental matrices [39], novel loss formulations [12], and the modeling of locality [59]. In OANet [58], order-aware filtering was utilized to incorporate context and spatial correlation. While all of these works rely on “vanilla” CN, we show how to improve its performance by embedding an attention mechanism therein. Our improvements are compatible with any of these techniques.
|
| 71 |
+
|
| 72 |
+
Normalization in deep networks. In addition to CN, different strategies have been proposed to normalize feature maps in a deep network, starting with the seminal work of Batch Normalization [22], which proposed to normalize the feature maps over a mini-batch. Layer Normalization [2] transposed this operation by looking at all channels for a single sample in the batch, whereas Group Normalization [51] applied it over subsets of channels. Further efforts have proposed to normalize the weights instead of the activations [41], or their eigenvalues [34]. The main use of all these normalization techniques is to stabilize the optimization process and speed up convergence. By contrast, Instance Normalization [46] proposed to normalize individual image samples for style transfer, and was improved upon in [21] by aligning the mean and standard deviation of content and style. Regardless of the specifics, all of these normalization techniques operate on the entire sample – in other words, they do not consider the presence of outliers or their statistics. While this is not critical in image-based pipelines, it can be extremely harmful for point clouds; see Fig. 1.
|
| 73 |
+
|
| 74 |
+
Attentional methods. The core idea behind attention mechanisms is to focus on the crucial parts of the input. There are different forms of attention, and they have been applied to a wide range of machine learning problems, from natural language processing to images. Vaswani et al. [48] proposed an attentional model for machine translation eschewing recurrent architectures. Luong et al. [32] blended two forms of attention on sequential inputs, demonstrating performance improvements in text translation. Xu et al. [54] showed how to employ soft and hard attention to gaze on salient objects and generate automated image captions. Local response normalization has been used to find salient responses in feature maps [24, 29], and can be interpreted as a form of lateral inhibition [19]. The use of attention in convolutional deep networks was pioneered by Spatial Transformer Networks [23], which introduced a differentiable sampler that allows for spatial manipulation of the image. In [53], attention is directly applied to the feature map, given by a PointNet-style network operating on point clouds. However, this strategy does not work as well as ours for wide-baseline stereo; see Section B in the supplementary material.
|
| 75 |
+
|
| 76 |
+
# 3. Attentive Context Normalization
|
| 77 |
+
|
| 78 |
+
Given a feature map $\mathbf{f} \in \mathbb{R}^{N \times C}$ , where $N$ is the number of features (or data points at layer zero), $C$ is the number of channels, and each row corresponds to a data point, we recall that Context Normalization [55] is a non-parametric operation that can be written as
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\mathcal {N} _ {C N} (\mathbf {f}) = \left(\mathbf {f} - \mu (\mathbf {f})\right) \oslash \sigma (\mathbf {f}), \tag {5}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $\mu (\mathbf{f}) = \mathbb{E}[\mathbf{f}]$ is the arithmetic mean, $\sigma (\mathbf{f}) = \sqrt{\mathbb{E}[(\mathbf{f} - \mathbb{E}[\mathbf{f}])^{\circ 2}]}$ is the standard deviation of
|
| 85 |
+
|
| 86 |
+
the features across $N$ , and $\varnothing$ denotes the element-wise division. Here we assume a single cloud, but generalizing to multiple clouds (i.e. batch) is straightforward. Note that to preserve the properties of unstructured clouds, the information in the feature maps needs to be normalized in a permutation-equivariant way. We extend CN by introducing a weight vector $\mathbf{w} \in [0, \dots, 1]^N$ , and indicate with $\mu_{\mathbf{w}}(\cdot)$ and $\sigma_{\mathbf{w}}(\cdot)$ the corresponding weighted mean and standard deviation. In contrast to Context Normalization, we compute the weights $\mathbf{w}$ with a parametric function $\mathcal{W}_{\omega}(\cdot)$ with trainable parameters $^2$ $\omega$ that takes as input the feature map, and returns a unit norm vector of weights:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathbf {w} = \eta (\mathcal {W} _ {\omega} (\mathbf {f})) , \quad \eta (\mathbf {x}) = \mathbf {x} / \| \mathbf {x} \| _ {1}. \tag {6}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
We then define Attentive Context Normalization as
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\mathcal {N} _ {A C N} (\mathbf {f}; \mathbf {w}) = \left(\mathbf {f} - \mu_ {\mathbf {w}} (\mathbf {f})\right) \oslash \sigma_ {\mathbf {w}} (\mathbf {f}). \tag {7}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
The purpose of the attention network $\mathcal{W}_{\omega}(\cdot)$ is to compute a weight function that focuses the normalization of the feature maps on a subset of the input features - the inliers. As a result, the network can learn to effectively cluster the features, and therefore separate inliers from outliers.
|
| 99 |
+
|
| 100 |
+
There are multiple attention functions that we can design, and multiple ways to combine them into a single attention vector $\mathbf{w}$ . We will now describe those that we found effective for finding correspondences in wide-baseline stereo, and how to combine and supervise them effectively.
|
| 101 |
+
|
| 102 |
+
Generating attention. We leverage two different types of attention mechanisms, local and global:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\mathbf {w} _ {i} ^ {\text {l o c a l}} = \mathcal {W} _ {\omega} ^ {\text {l o c a l}} (\mathbf {f} _ {i}) = \operatorname {s i g m o i d} \left(\mathbf {W} \mathbf {f} _ {i} ^ {\top} + \mathbf {b}\right), \tag {8}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathbf {w} _ {i} ^ {\text {g l o b a l}} = \mathcal {W} _ {\omega} ^ {\text {g l o b a l}} (\mathbf {f} _ {i}) = \frac {\exp \left(\mathbf {W} \mathbf {f} _ {i} ^ {\top} + \mathbf {b}\right)}{\sum_ {j = 1} ^ {N} \exp \left(\mathbf {W} \mathbf {f} _ {j} ^ {\top} + \mathbf {b}\right)}, \tag {9}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
where $\mathbf{W}$ and $\mathbf{b}$ are the parameters of a perceptron, and $\mathbf{f}_k$ denotes the feature vector for data point $k$ - the $k$ -th row of the feature map $\mathbf{f}$ . Observe that the local attention mechanism (8) acts on each feature vector independently, whereas the global attention mechanism (9) relates the feature vector for each data point to the collection through softmax.
|
| 113 |
+
|
| 114 |
+
Blending attention. Note that the product does not change the scale of the normalization applied in (7). Therefore, to take into account multiple types of attention simultaneously, we simply merge them together WLremove "toghether" to avoid redundancy through element-wise multiplication. One could use a parametric form of attention blending instead; however, it is non-trivial to combine the weights in a permutation-equivariant way, and we found this simple strategy effective.
|
| 115 |
+
|
| 116 |
+

|
| 117 |
+
Figure 2. ACNe architecture – (Left) Our permutation-equivariant network receives an input tensor $\mathbf{P}$ of size $N\times D$ , which is processed by a series of $K$ Attentive Residual Blocks (ARB). The output of the network is a tensor $\mathbf{O}$ size $N\times C$ , which is then converted to a representation appropriate for the task at hand. Note that the first perceptron $\mathcal{F}_{\varphi}^{\mathrm{in}}$ changes the dimensionality from $\mathbf{P}$ of size $N\times D$ (input dimensions) to features $\mathbf{f}$ of size $N\times C$ . (Middle) Within each residual path of the ARB, we manipulate the feature map with perceptrons $\mathcal{F}_{\varphi}$ with parameters $\varphi$ , followed by Attentive Context Normalization (ACN) – we repeat this structure twice. (Right) An ACN module computes local/global attention with two trainable networks, combines them via element-wise multiplication, and normalizes the feature maps with said weights – the $\mathcal{N}_{\mathrm{ACN}}$ block – followed by Group Normalization. Note that all features are all processed in the same way, individually, and the ACN block is the only place where they interact with each other – this architecture guarantees permutation-equivariance.
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
|
| 123 |
+
Supervising attention. In some problems, the class for each data point is known a priori and explicit supervision can be performed. In this case, adding a supervised loss on the attention signals can be beneficial. For instance, when finding good correspondences for stereo we can apply binary-cross entropy using the epipolar distance to generate labels for each putative correspondence, as in [55]. Our experiments in Section 6.4 show that while this type of supervision can provide a small boost in performance (1-2%), our approach performs nearly as well without this supervision.
|
| 124 |
+
|
| 125 |
+
# 4. Network architecture and applications
|
| 126 |
+
|
| 127 |
+
Our network receives as input $\mathbf{P} \in \mathbb{R}^{N \times D}$ , the tensor representation of $\mathcal{P}$ , and produces an output tensor $\mathbf{O} \in \mathbb{R}^{N \times C}$ . Note that as $\mathcal{P}$ is unstructured, $\mathbf{O}$ must be equivariant with respect to permutations of the $N$ rows of $\mathbf{P}$ . This output tensor is then used in different ways according to the task at hand. We model our architecture after [55], which we refer to as Context Network (CNe). It features a series of residual blocks [20] with Context Normalization (CN). Our architecture, which we call Attentive Context Network, or ACNe, is pictured in Fig. 2. A key distinction is that within each normalization block (Fig. 2; right) we link the individual outputs of each perceptron $\mathcal{F}_{\varphi}$ to our ACN layer. We also replace the Batch Normalization layers [22] used in [55] with Group Normalization [51], as we found it performs better; see Section 6.4 for ablation tests.
|
| 128 |
+
|
| 129 |
+
We demonstrate that ACNe can be used to solve multiple applications, ranging from classical problems such as robust line fitting (Section 4.1) and point cloud classification on MNIST and ModelNet40 (Section 4.2), to robust camera pose estimation for wide-baseline stereo (Section 4.3).
|
| 130 |
+
|
| 131 |
+
# 4.1. Robust line fitting
|
| 132 |
+
|
| 133 |
+
We consider the problem of fitting a line to a collection of points $\mathbf{P} \in \mathbb{R}^{N \times 2}$ that is ridden by noise and outliers; see Fig. 1. This problem can be addressed via smooth (IRLS)
|
| 134 |
+
|
| 135 |
+
or combinatorial (RANSAC) optimization - both methods can be interpreted in terms of sparse optimization, such that inliers and outliers are clustered separately; see [7]. Let us parameterize a line as the locus of point $[x,y]$ such that $\pmb{\theta} \cdot [x,y,1] = 0$ . We can then score each row of $\mathbf{P}$ (i.e. each 2D point) by passing the output tensor $\mathbf{O} = \mathrm{ACNe}(\mathbf{P})$ to an additional weight network - with local and global components - following (6), yielding weights $\mathbf{w} = \eta (\mathcal{W}_{\omega}(\mathbf{O}))$ . Given $\mathbf{w}$ , and expressing our points in homogeneous coordinates as $\bar{\mathbf{P}} = [\mathbf{P},1] \in \mathbb{R}^{N \times 3}$ , we can compute our covariance matrix as $\mathbf{C_{w}}(\mathbf{P}) = \bar{\mathbf{P}}^{\top} \mathrm{diag}(\mathbf{w})^{2} \bar{\mathbf{P}} \in \mathbb{R}^{3 \times 3}$ . Then, denoting $\nu_0[\mathbf{C}]$ as the eigenvector of $\mathbf{C}$ corresponding to its smallest eigenvalue, $\nu_0[\mathbf{C_w}(\mathbf{P})]$ is the estimated plane equation that we seek to find. We, therefore, minimize the difference between this eigenvector and the ground truth, with additional guidance to $\mathbf{w}^{\mathrm{local}}$ to help convergence:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\begin{array}{l} \mathcal {L} (\boldsymbol {\omega}) = \alpha \min _ {+ / -} \left\{\| \nu_ {0} [ \mathbf {C} _ {\mathbf {w}} (\mathbf {P}) ] \pm \boldsymbol {\theta} \| _ {2} ^ {2} \right\} \\ + \beta \mathbb {E} \left[ H (\mathbf {y}, \mathbf {w} ^ {\text {l o c a l}}) \right], \tag {10} \\ \end{array}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $\mathbb{E}\left[H(\mathbf{a},\mathbf{b})\right]$ is the average binary cross entropy between $\mathbf{a}$ and $\mathbf{b}$ , $\mathbf{y}$ is the ground-truth inlier label, and hyperparameters $\alpha$ and $\beta$ control the influence of these losses. The $\min_{+/-}$ resolves the issue that $-\theta$ and $\theta$ are the same line.
|
| 142 |
+
|
| 143 |
+
# 4.2. Point cloud classification
|
| 144 |
+
|
| 145 |
+
We can also apply ACNe to point cloud classification rather than reasoning about individual points. As in the previous application, we consider a set of 2D or 3D locations $\mathbf{P} \in \mathbb{R}^{N \times D}$ as input, where $D$ is the number of dimensions. In order to classify each point set, we transform the output tensor $\mathbf{O} = \mathrm{ACNe}(\mathbf{P})$ into a single vector $\mathbf{v} = \mu_{\mathbf{w}}(\mathbf{O})$ and associate it with a ground-truth one-hot vector $\mathbf{y}$ through softmax. Additional weight networks to generate $\mathbf{w}$ are trained for this task. We train with the cross entropy loss. Thus, the loss that we optimize is:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {L} (\boldsymbol {\omega}) = H \left(\mathbf {y}, \operatorname {s o f t m a x} (\mathbf {v})\right). \tag {11}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
input
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
10%
|
| 156 |
+
Figure 3. Classification - We add salt-and-pepper noise to MNIST images, and then convert the digits to an unstructured point cloud. The $\%$ reports the outlier-to-inlier ratio.
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
20%
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
30%
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
40%
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
50%
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
60%
|
| 172 |
+
|
| 173 |
+
# 4.3. Wide-baseline stereo
|
| 174 |
+
|
| 175 |
+
In stereo we are given correspondences as input, which is thus $\mathbf{P} \in \mathbb{R}^{N \times 4}$ , where $N$ is the number of correspondences and each row contains two pixel locations on different images. In order to remain comparable with traditional methods, we aim to solve for the Fundamental matrix, instead of the Essential matrix, i.e., without assuming known camera intrinsics. Thus, differently from [55, 12, 59], we simply normalize the image coordinates with the image size instead. This makes our method more broadly applicable, and directly comparable with most robust estimation methods for stereo [16, 45, 9, 10, 4].
|
| 176 |
+
|
| 177 |
+
We obtain $\mathbf{w}$ from the output tensor $\mathbf{O} = \mathrm{ACNe}(\mathbf{P})$ via (6), as in Section 4.1. The weights $\mathbf{w}$ indicate which correspondences are considered to be inliers and their relative importance. We then apply a weighted variant of the 8-point algorithm [18] to retrieve the Fundamental matrix $\hat{\mathbf{F}}$ , which parameterizes the relative camera motion between the two cameras. To do so we adopt the differentiable, non-parametric form proposed by [55], and denote this operation as $\hat{\mathbf{F}} = g(\mathbf{X},\mathbf{w})$ . We then train our network to regress the ground-truth Fundamental matrix, as well as providing auxiliary guidance to $\mathbf{w}^{\mathrm{local}}$ – the final local attention used to construct the output of the network – with per-correspondence labels obtained by thresholding over the symmetric epipolar distance [18], as in [55]. In addition, we also perform auxiliary supervision on $\mathbf{w}_k^{\mathrm{local}}$ – the intermediate local attentions within the network – as discussed in Section 3. Note that this loss is not necessary, but helps training and provides a small boost in performance; see Section 6.4. We do not supervise global attention and leave it for the network to learn. We therefore write:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\begin{array}{l} \mathcal {L} (\boldsymbol {\omega}) = \alpha \min _ {+ / -} \left\{\left\| \hat {\mathbf {F}} \pm \mathbf {F} ^ {*} \right\| _ {F} ^ {2} \right\} + \beta \mathbb {E} \left[ H (\mathbf {y}, \mathbf {w} ^ {\text {l o c a l}}) \right] \\ + \gamma \mathbb {E} _ {k} \left[ H \left(\mathbf {y}, \mathbf {w} _ {k} ^ {\text {l o c a l}}\right) \right], \tag {12} \\ \end{array}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
where $\| \cdot \|_F$ is the Frobenius norm, $H$ is the binary cross entropy, and $\mathbf{y}$ denotes ground truth inlier labels. Again, the hyper-parameters $\alpha$ , $\beta$ , and $\gamma$ control the influence of each loss. Similarly to the line-fitting case, the $\min_{+/-}$ resolves the issue that $-\mathbf{F}^*$ and $\mathbf{F}^*$ express the same solution.
|
| 184 |
+
|
| 185 |
+
# 5. Implementation details
|
| 186 |
+
|
| 187 |
+
We employ a K-layer structure (excluding the first linear layer that changes according to the number of channels) for ACNe, with $\mathrm{K} \times \mathrm{ARB}$ units, and two perceptron layers in each ARB. The number of layers $\mathrm{K}$ is set to $3 \times$ for 2D point cloud classification, $6 \times$ for robust line fitting, and $12 \times$ for stereo. For 3D point cloud classification, we add ACN normalization to an existing architecture. We also use 32 groups for Group Normalization, as suggested in [51]. Similarly to [55], we use $C = 128$ channels per perceptron.
|
| 188 |
+
|
| 189 |
+
Training setup. For all applications we use the ADAM optimizer [25] with default parameters and a learning rate of $10^{-3}$ . Except for robust line fitting, we use a validation set to perform early stopping. For robust line fitting, the data is purely synthetic and thus infinite, and we train for 50k iterations. For MNIST, we use 70k samples with a 8:1:1 split for training, validation and testing. For stereo, we use the splits from [58]. For the loss term involving eigen-decomposition (terms multiplied by $\alpha$ in (10) and (12)), we use $\alpha = 0.1$ , following [55]. All other loss terms have a weight of 1, that is, $\beta = 1$ and $\gamma = 1$ . For stereo, we follow [55] and enable the term involving the Fundamental matrix - the first term in (12) - after 20k iterations.
|
| 190 |
+
|
| 191 |
+
Robust estimators for stereo inference. As a special case, we evaluate the possibility of applying standard robust estimators for outlier rejection such as RANSAC after training the model to potentially maximize its performance, as previously done in [55, 12, 58]. To do so, we modify our architecture by changing the final layer to output only the local attention with the $\mathrm{ReLU} + \mathrm{Tanh}$ activation, as in Yi et al. [55]. We then simply threshold w with zero, select the data points that survive this process as inliers, and feed them to different RANSAC methods to process them further. We compare these results with those obtained directly from the weighted 8-point formulation.
|
| 192 |
+
|
| 193 |
+
# 6. Results
|
| 194 |
+
|
| 195 |
+
We first consider a toy example on fitting 2D lines with a large ratio of outliers. We then apply our method to point cloud classification, following [36, 37], which includes 2D for digit classification on MNIST and 3D for object classification on ModelNet40 [52]. These three experiments illustrate that our attentional method performs better than
|
| 196 |
+
|
| 197 |
+
<table><tr><td>Outlier ratio</td><td>60%</td><td>70%</td><td>80%</td><td>85%</td><td>90%</td></tr><tr><td>CNe [55]</td><td>.00019</td><td>.0038</td><td>.056</td><td>.162</td><td>.425</td></tr><tr><td>ACNe (Ours)</td><td>1e-6</td><td>.0008</td><td>.024</td><td>.130</td><td>.383</td></tr></table>
|
| 198 |
+
|
| 199 |
+
Table 1. Robust line fitting - Line fitting results over the test set in terms of the $\ell_2$ distance between ground-truth and the estimates.
|
| 200 |
+
|
| 201 |
+
<table><tr><td>Outlier ratio</td><td>0%</td><td>10%</td><td>20%</td><td>30%</td><td>40%</td><td>50%</td><td>60%</td></tr><tr><td>PointNet [36]</td><td>98.1</td><td>95.1</td><td>93.2</td><td>79.5</td><td>67.7</td><td>70.0</td><td>54.8</td></tr><tr><td>CNe [55]</td><td>98.0</td><td>95.8</td><td>94.0</td><td>91.0</td><td>90.1</td><td>87.7</td><td>87.2</td></tr><tr><td>ACNe (Ours)</td><td>98.3</td><td>97.2</td><td>96.5</td><td>95.3</td><td>94.7</td><td>94.3</td><td>93.7</td></tr></table>
|
| 202 |
+
|
| 203 |
+
vanilla Context Normalization under the presence of outliers. We then apply our solution to wide-baseline stereo, and demonstrate that this increase in performance holds on challenging real-world applications, and against state-of-the-art methods for robust pose estimation. Finally, we perform an ablation study and evaluate the effect of supervising the weights used for attention in stereo.
|
| 204 |
+
|
| 205 |
+
# 6.1. Robust line fitting - Fig. 1 and Table 1
|
| 206 |
+
|
| 207 |
+
To generate 2D points on a random line, as well as outliers, we first sample 2D points uniformly within the range $[-1, +1]$ . We then select two points randomly and fit a line that goes through them. With probability according to the desired inlier ratio, we then project each point onto the line to form inliers. We measure the error in terms of the $\ell_2$ distance between the estimated and ground truth values for the line parameters. The results are summarized in Table 1, with qualitative examples in Fig. 1. ACNe consistently outperforms CNe [55]. Both methods break down at a $85 - 90\%$ outlier ratio, while the performance of ACNe degrades more gracefully. As illustrated in Fig. 1, our method learns to progressively focus on the inliers throughout the different layers of the network and weeds out the outliers.
|
| 208 |
+
|
| 209 |
+
# 6.2. Classifying digits - Fig. 3 and Table 2
|
| 210 |
+
|
| 211 |
+
We evaluate our approach on handwritten digit classification on MNIST, which consists of $28 \times 28$ grayscale images. We create a point cloud from these images following the procedure of [36]: we threshold each image at 128 and use the coordinates - normalized to a unit bounding box - of the surviving pixel locations as data samples. We subsample 512 points with replacement, in order to have the same number of points for all training examples. We also add a small Gaussian noise of 0.01 to the pixel coordinates after sampling following [36]. Outliers are generated by sampling from a uniform random distribution. We compare our method against vanilla PointNet [36] and CNe [55]. For PointNet, we re-implemented their method under our framework to have an identical training setup.
|
| 212 |
+
|
| 213 |
+
Table 2. 2D Point cloud classification - Classification accuracy on MNIST, under different outlier ratios (\%). Our method performs best in all cases, and the gap becomes wider with more outliers.
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Outlier ratio</td><td>0%</td><td>10%</td><td>20%</td><td>30%</td><td>40%</td><td>50%</td></tr><tr><td>PointNet</td><td>85.8</td><td>81.7</td><td>81.7</td><td>80.1</td><td>78.2</td><td>78.5</td></tr><tr><td>PointNet w/ CN</td><td>87.2</td><td>84.3</td><td>84.5</td><td>83.4</td><td>81.7</td><td>81.6</td></tr><tr><td>PointNet w/ ACN</td><td>87.7</td><td>84.6</td><td>85.0</td><td>84.6</td><td>83.3</td><td>84.2</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Table 3. 3D Point cloud classification – We replicate the 3D point classification experiment on ModelNet40 from [36], with vanilla PointNet. We then add outliers with Gaussian noise. Our approach performs best with and without outliers.
|
| 218 |
+
|
| 219 |
+
Table 2 summarizes the results in terms of classification accuracy. Our method performs best, with the gap widening as the outlier ratio increases – while CNe shows some robustness to noise, PointNet quickly breaks down. Note that the results for PointNet are slightly different from the ones reported in [36], as we use a validation split to perform early stopping. In addition, to reduce randomness, we train 10 different models and report the average results.
|
| 220 |
+
|
| 221 |
+
# 6.3. Classifying 3D objects - Table 3
|
| 222 |
+
|
| 223 |
+
We apply our method to the problem of 3D object (point cloud) classification. We use the ModelNet40 dataset [52], and compare with PointNet [37]. Similarly to the MNIST case, we contaminate the dataset with outliers to test the robustness of each method. Specifically, we add a predetermined ratio of outliers to the point clouds, sampled uniformly within the range $[-1, 1]$ . We also add small Gaussian perturbations to the locations of the points, with a standard deviation of 0.01. We then sample 1024 points from the point cloud to perform classification. Again, to simply test if ACN can improve existing pipelines, we plug our normalization into the vanilla PointNet architecture. Note that the original PointNet includes an affine estimation step which provides a small performance boost – we omit it from our implementation, in order to isolate the architectural differences between the methods. We report the results in Table 3. Our method performs best, with the gap becoming wider as outliers become prevalent.
|
| 224 |
+
|
| 225 |
+
# 6.4. Wide-baseline stereo - Fig. 4 and Table 4
|
| 226 |
+
|
| 227 |
+
Wide-baseline stereo is an extremely challenging problem, due to the large number of variables to account for - viewpoint, scale, illumination, occlusions, and properties of the imaging device - see Fig. 4 for some examples. We benchmark our approach on a real-world dataset against multiple state-of-the-art baselines, following the data [58] and protocols provided by [55]. Their ground truth camera poses are obtained from Structure-from-Motion with VisualSfM [50], from large image collections of publicly available, challenging photo-tourism data.
|
| 228 |
+
|
| 229 |
+
We evaluate performance in terms of the reconstructed poses. Since the stereo matching problem is defined only up to a scale factor [18], it is not possible to compute absolute (metric) errors for translation. Instead, we follow
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Figure 4. Wide-baseline stereo - We show the results of different matching algorithms on the dataset of [55]. We draw the inliers produced by them, in green if the match is below the epipolar distance threshold (in red otherwise). Note that this may include some false positives, as epipolar constraints map points to lines - perfect ground truth would require dense pixel-to-pixel correspondences.
|
| 233 |
+
|
| 234 |
+
the methodology of [55] and measure the error between the ground truth and estimated vectors between both cameras, for both rotation and translation, and combine them by taking the maximum of the two. We then evaluate the accuracy over all image pairs at multiple error thresholds, accumulate it up to a limit (either $10^{\circ}$ or $20^{\circ}$ ), and summarize performance by its mean - which we call the mean Average Precision (mAP); see [55]. This means that methods that perform better at lower error thresholds are rewarded. We use their data mostly as is, using the pre-extracted correspondences and splits from OANet [58], but adapt it to the Fundamental matrix problem. In contrast to previous works [55, 58], which report results on the scene the models were trained on, we focus on unknown scenes, in order to determine each method's actual performance.
|
| 235 |
+
|
| 236 |
+
As we discussed in Section 4.3, both CNe [55] and OANet [58] assume known camera intrinsic and estimate the Essential matrix, instead of the Fundamental matrix - this is a significantly easier problem, as the number of free parameters drops from 7 to 5. However, most research papers on this topic focus on estimating the Fundamental matrix [9, 10, 38, 3, 4], which is why we focus on this problem instead. For completeness, we also report results for the Essential matrix in the supplementary appendix, for which we also achieve state-of-the-art results.
|
| 237 |
+
|
| 238 |
+
In more detail, given an image pair, we extract 2k keypoints for each image with SIFT [31]. Matches are then formed from one image to the other, in both directions. As is typical for image matching, we then filter out nondiscriminative correspondences via bi-directional check, enforcing one-to-one matching. For RANSAC variants we found it to be critical to further apply Lowe's ratio test [31] without it RANSAC variants provide worse results. We apply it with a ratio threshold of 0.8. We do not apply this test for learned methods, as it throws out too many inliers for learned methods to bring any benefit. Also, when training learned methods, we train without bidirectional check to show as many correspondences in the training set as possible to the network.
|
| 239 |
+
|
| 240 |
+
We consider the following methods: LMedS [40], RANSAC [16, 9], MLESAC [45], DegenSAC [10], MAGSAC [4] CNe [55], DFE [39], OANet [58] and ACNe (ours). We consider the pose estimated with the weighted 8-point algorithm directly, as well as those combined with a robust outlier rejection method as outlined in Section 5.
|
| 241 |
+
|
| 242 |
+
Quantitative results. We report quantitative results in Table 4, for two different error thresholds $(10^{\circ} / 20^{\circ})$ . We make three fundamental observations:
|
| 243 |
+
|
| 244 |
+
(1) Our method consistently outperforms all of the baselines, including CNe and OANet. The difference in performance between ACNe and its closest competitor, OANet, is of $14.1 / 9.8\%$ relative for Outdoors, and $8.6 / 9.2\%$ relative for Indoors when used without any additional post processing. The gap for Outdoors is reduced to $1\%$ when they are combined with MAGSAC, but ACNe still outperforms OANet. For Indoors, we observe a drop in performance for both OANet and ACNe when combining them with RANSAC or MAGSAC. The margin between learned and traditional methods is significant, with ACNe performing $30.1 / 39.6\%$ better relative on Outdoors and $45.5 / 47.1\%$ better relative on Indoors, compared to the best performing traditional baseline – including a very recent method, MAGSAC.
|
| 245 |
+
(2) Different from the findings of [55], we observe that RANSAC variants may harm performance, particularly with ACNe. This is because through its global attention - $\mathbf{w}^{\mathrm{global}}$ - ACNe can infer the relative importance of each correspondence, which is not easily taken into account when passing samples to a robust estimator. In this manner, ACNe goes beyond simple outlier rejection. The best performance is typically achieved by using ACNe at its pure form, directly feeding its weights to the weighted 8-point algorithm. Given that all our experiments are on unseen sequences, this further shows that ACNe generalizes very well, even without being followed by an additional robust estimator.
|
| 246 |
+
(3) Contrary to the results of Yi et al. [55] and Zhang et al. [58], we find that traditional baselines perform better than reported on either work. This is because their exper
|
| 247 |
+
|
| 248 |
+
<table><tr><td></td><td>Method</td><td>Outdoors</td><td>Indoors</td></tr><tr><td rowspan="5">Traditional</td><td>LMedS</td><td>.296/.383</td><td>.142/.235</td></tr><tr><td>RANSAC</td><td>.356/.437</td><td>.172/.272</td></tr><tr><td>MLESAC</td><td>.148/.216</td><td>.135/.230</td></tr><tr><td>DegenSAC</td><td>.328/.394</td><td>.191/.291</td></tr><tr><td>MAGSAC</td><td>.385/.457</td><td>.185/.282</td></tr><tr><td rowspan="9">Learned</td><td>CNe (weighted-8pt)</td><td>.323/.469</td><td>.189/.331</td></tr><tr><td>CNe+RANSAC</td><td>.449/.554</td><td>.201/.315</td></tr><tr><td>CNe+MAGSAC</td><td>.500/.598</td><td>.213/.326</td></tr><tr><td>DFE (weighted-8pt)3</td><td>.319/.470</td><td>.167/.294</td></tr><tr><td>DFE+RANSAC</td><td>.414/.508</td><td>.193/.303</td></tr><tr><td>DFE+MAGSAC</td><td>.452/.541</td><td>.211/.320</td></tr><tr><td>OANet (weighted-8pt)</td><td>.439/.581</td><td>.256/.392</td></tr><tr><td>OANet+RANSAC</td><td>.482/.592</td><td>.211/.331</td></tr><tr><td>OANet+MAGSAC</td><td>.514/.615</td><td>.230/.346</td></tr><tr><td rowspan="3">Ours</td><td>ACNe (weighted-8pt)</td><td>.501/.638</td><td>.278/.428</td></tr><tr><td>ACNe+RANSAC</td><td>.478/.590</td><td>.209/.329</td></tr><tr><td>ACNe+MAGSAC</td><td>.518/.621</td><td>.226/.343</td></tr></table>
|
| 249 |
+
|
| 250 |
+
imental setup did not consider Lowe's ratio test, nor the bidirectional check. Without these, the performance of traditional baselines drops drastically – RANSAC and MAGSAC drop $79.2 / 73.0\%$ and $92.0 / 85.1\%$ relative performance, respectively, for Outdoors, and $66.9 / 59.2\%$ and $82.2 / 74.1\%$ for Indoors.
|
| 251 |
+
|
| 252 |
+
Ablation study – Table 5. We perform an ablation study to evaluate the effect of the different types of attention, as well as the supervision on the local component of the attentive mechanism. We also compare with CNe, as its architecture is the most similar to ours. We use the train and validation splits for the Saint Peter's Square sequence for this study, as it is the primary sequence used for training in [55] and has many images within the set. (1) We confirm that CNe [55] performs better with Batch Normalization (BN) [22] than with Group Normalization (GN) [51] – we use GN for ACNe, as it seems to perform marginally better with our attention mechanism. (2) We observe that our attentive mechanisms allow ACNe to outperform CNe, and that their combination outperforms their separate use. (3) Applying supervision on the weights further boosts performance.
|
| 253 |
+
|
| 254 |
+
With learned features – Table 6. Finally, we report that our method also works well with two state-of-the-art, learned local feature methods – SuperPoint [14] and LF-Net [35]. They are learned end-to-end – their characteristics are thus different from those of SIFT keypoints. We test again on Saint Peter's Square, as our primary focus is to show that it is possible to use other feature types. In Table 6 we report that
|
| 255 |
+
|
| 256 |
+
Table 4. Pose estimation accuracy - mAP at $10^{\circ}/20^{\circ}$ error threshold. Similarly to [55], we consider multiple baselines, as well as pairing different methods with state-of-the-art RANSAC variants. Our method consistently outperforms all others by a significant margin, even without an additional robust estimator, in some cases.
|
| 257 |
+
|
| 258 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="2">CNe [55]</td><td colspan="4">ACNe (Ours)</td></tr><tr><td>w/ BN</td><td>w/ GN</td><td>L</td><td>G</td><td>L+G</td><td>L+G+S</td></tr><tr><td>Weighted-8pt</td><td>.435</td><td>.414</td><td>.531</td><td>.593</td><td>.597</td><td>.602</td></tr></table>
|
| 259 |
+
|
| 260 |
+
Table 5. Ablation study - mAP at $20^{\circ}$ with different CNe [55] and ACNe (ours) variants on stereo. The labels indicate: $L$ - local attention; $G$ - global attention; $S$ - local attention supervision.
|
| 261 |
+
|
| 262 |
+
<table><tr><td></td><td>SIFT</td><td>SuperPoint</td><td>LF-Net</td></tr><tr><td>MAGSAC (w/o ratio test)</td><td>.146</td><td>.205</td><td>.134</td></tr><tr><td>MAGSAC</td><td>.264</td><td>.230</td><td>.157</td></tr><tr><td>OANet (weighted 8pt)</td><td>.488</td><td>.547</td><td>.543</td></tr><tr><td>OANet+MAGSAC</td><td>.479</td><td>.442</td><td>.452</td></tr><tr><td>ACNe (weighted 8pt)</td><td>.602</td><td>.637</td><td>.619</td></tr></table>
|
| 263 |
+
|
| 264 |
+
Table 6. With learned local features - mAP at $20^{\circ}$ with learned local features and different methods. Our method outperforms other methods and performs best with SuperPoint [14].
|
| 265 |
+
|
| 266 |
+
both methods improve performance over SIFT with OANet and ACNe, but with ratio test and MAGSAC they perform worse. It is interesting how SuperPoint, without the ratio test, performs better than SIFT with MAGSAC, but the order is reversed when the ratio test is introduced, highlighting its importance. Regardless of feature type, we demonstrate that our approach provides improved performance over other methods, and that it pairs best with SuperPoint.
|
| 267 |
+
|
| 268 |
+
# 7. Conclusion
|
| 269 |
+
|
| 270 |
+
We have proposed Attentive Context Normalization (ACN), and used it to build Attentive Context Networks (ACNe) to solve problems on permutation-equivariant data. Our solution is inspired by IRLS, where one iteratively re-weighs the importance of each sample, via a soft inlier/outlier assignment. We demonstrated that by learning both local and global attention we are able to outperform state-of-the-art solutions on line fitting, classification of point clouds in 2D (digits) and 3D (objects), and challenging wide-baseline stereo problems. Notably, our method thrives under large outlier ratios. For future research directions, we consider incorporating ACN into general normalization techniques for deep learning. We believe that this is an interesting direction to pursue, as all existing techniques make use of statistical moments.
|
| 271 |
+
|
| 272 |
+
# Acknowledgements
|
| 273 |
+
|
| 274 |
+
This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC) Discovery Grant, NSERC Collaborative Research and Development Grant (Google), and by Compute Canada.
|
| 275 |
+
|
| 276 |
+
# References
|
| 277 |
+
|
| 278 |
+
[1] Sameer Agarwal, Noah Snavely, Ian Simon, Steven M. Seitz, and Richard Szeliski. Building Rome in One Day. In Int. Conf. on Comput. Vis., 2009. 1
|
| 279 |
+
[2] Jimmy L. Ba, Jamie R. Kiros, and Geoffrey E. Hinton. Layer Normalization. arXiv Preprint, 2016. 1, 2, 3
|
| 280 |
+
[3] Daniel Barath and Jiří Matas. Graph-cut RANSAC. In Conf. on Comput. Vis. Pattern Recognit., 2018. 7
|
| 281 |
+
[4] Daniel Barath, Jana Noskova, and Jiri Matas. MAGSAC: Marginalizing Sample Consensus. In Conf. on Comput. Vis. Pattern Recognit., 2019. 5, 7
|
| 282 |
+
[5] JiaWang Bian, Wen-Yan Lin, Yasuyuki Matsushita, and SaiKit Yeungand Tan-Dat Nguyena nd Ming-Ming Cheng. GMS: Grid-Based Motion Statistics for Fast, Ultra-Robust Feature Correspondence. In Conf. on Comput. Vis. Pattern Recognit., 2017. 2
|
| 283 |
+
[6] Sofien Bouaziz, Andrea Tagliasacchi, Hao Li, and Mark Pauly. Modern Techniques and Applications for Real-Time Non-rigid Registration. In SIGGRAPH Asia (Technical Course Notes), 2016. 1
|
| 284 |
+
[7] Sofien Bouaziz, Andrea Tagliasacchi, and Mark Pauly. Sparse Iterative Closest Point. In Comput. Graphics Forum, 2013. 4
|
| 285 |
+
[8] Rick Chartrand and Wotao Yin. Iteratively Reweighted Algorithms for Compressive Sensing. In Int. Conf. on Acoustics, Speech, Signal Process., 2008. 2
|
| 286 |
+
[9] Ondrej Chum, Jií Matas, and Josef Kittler. Locally Optimized RANSAC. In Joint Pattern Recognition Symposium, 2003. 5, 7
|
| 287 |
+
[10] Ondrej Chum, Tomas Werner, and Jiri Matas. Two-view Geometry Estimation Unaffected by a Dominant Plane. In Conf. on Comput. Vis. Pattern Recognit., 2005. 5, 7
|
| 288 |
+
[11] Andrew Cotter, Maya Gupta, Heinrich Jiang, Erez Louidor, James Muller, Tamann Narayan, Serena Wang, and Tao Zhu. Shape constraints for set functions. In Int. Conf. on Mach. Learn., pages 1388-1396, 2019. 2
|
| 289 |
+
[12] Zheng Dang, Kwang Moo Yi, Yinlin Hu, Fei Wang, Pascal Fua, and Mathieu Salzmann. Eigendecomposition-Free Training of Deep Networks with Zero Eigenvalue-Based Losses. In European Conf. on Comput. Vis., 2018. 1, 2, 5
|
| 290 |
+
[13] Michael Defferrard, Xavier Bresson, and Pierre Vandergheynst. Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering. In Adv. Neural Inf. Process. Syst., 2016. 2
|
| 291 |
+
[14] Daniel Detone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-Supervised Interest Point Detection and Description. CVPR Workshop on Deep Learning for Visual SLAM, 2018. 8
|
| 292 |
+
[15] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-Net: A Trainable CNN for Joint Detection and Description of Local Features. Conf. on Comput. Vis. Pattern Recognit., 2019, 2, 11
|
| 293 |
+
[16] Martin A. Fischler and Robert C. Bolles. Random Sample Consensus: A Paradigm for Model Fitting with Applications to Image Analysis and Automated Cartography. Communications ACM, 24(6):381-395, 1981. 1, 2, 5, 7
|
| 294 |
+
|
| 295 |
+
[17] John Fox. An R and S-Plus Companion to Applied Regression. Sage, 2002. 2
|
| 296 |
+
[18] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2000. 5, 6
|
| 297 |
+
[19] H K. Hartline, Henry G. Wagner, and Floyd Ratliff. Inhibition in the Eye of Limulus. Journal of General Psychology, 1956. 3
|
| 298 |
+
[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep Residual Learning for Image Recognition. In Conf. on Comput. Vis. Pattern Recognit., 2016. 4
|
| 299 |
+
[21] Xun Huang and Serge Belongie. Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization. In Int. Conf. on Comput. Vis., 2017. 3
|
| 300 |
+
[22] Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. In Int. Conf. on Mach. Learn., 2015. 1, 2, 3, 4, 8
|
| 301 |
+
[23] Max Jaderberg, Karen Simonyan, Andrew Zisserman, and Koray Kavukcuoglu. Spatial Transformer Networks. In Adv. Neural Inf. Process. Syst., 2015. 3
|
| 302 |
+
[24] Kevin Jarrett, Koray Kavukcuoglu, Marc A. Ranzato, and Yann LeCun. What is the Best Multi-Stage Architecture for Object Recognition? In Int. Conf. on Comput. Vis., 2009. 3
|
| 303 |
+
[25] Diederik P. Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimisation. In Int. Conf. on Learn. Representations, 2015. 5
|
| 304 |
+
[26] Thomas N. Kipf and Max Welling. Semi-Supervised Classification with Graph Convolutional Networks. Int. Conf. on Learn. Representations, 2017. 2
|
| 305 |
+
[27] Florian Kluger, Eric Brachmann, Hanno Ackermann, Carsten Rother, Michael Ying Yang, and Bodo Rosenhahn. CON-SAC: Robust Multi-Model Fitting by Conditional Sample Consensus. CVPR, 2020. 1
|
| 306 |
+
[28] Johannes Kopf, Michael F Cohen, and Richard Szeliski. First-Person Hyper-Lapse Videos. ACM Trans. on Graphics, 33(4):78, 2014. 1
|
| 307 |
+
[29] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. ImageNet Classification with Deep Convolutional Neural Networks. In Adv. Neural Inf. Process. Syst., 2012. 3
|
| 308 |
+
[30] Xinhai Liu, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Point2Sequence: Learning the Shape Representation of 3D Point Clouds with an Attention-Based Sequence to Sequence Network. Amer. Assoc. for Artif. Intell. Conf., 2019. 2
|
| 309 |
+
[31] David G. Lowe. Distinctive Image Features from Scale-Invariant Keypoints. Int. J. Comput. Vis., 20(2):91-110, 2004. 1, 7
|
| 310 |
+
[32] Minh-Thang Luong, Hieu Pham, and Christopher D. Manning. Effective Approaches to Attention-based Neural Machine Translation. Empirical Methods in Nat. Language Process., 2015. 3
|
| 311 |
+
[33] Muhammad J Mirza and Kim L Boyer. Performance Evaluation of a Class of M-estimators for Surface Parameter Estimation in Noisy Range Data. IEEE Transactions on Robotics and Automation, 9(1):75–85, 1993. 2
|
| 312 |
+
|
| 313 |
+
[34] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral Normalization for Generative Adversarial Networks. In Int. Conf. on Learn. Representations, 2018. 3
|
| 314 |
+
[35] Yuki Ono, Eduard Trulls, Pascal Fua, and Kwang Moo Yi. Lf-Net: Learning Local Features from Images. In Adv. Neural Inf. Process. Syst., 2018. 8
|
| 315 |
+
[36] Charles R. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep Learning on Point Sets for 3D Classification and Segmentation. In Conf. on Comput. Vis. Pattern Recognit., 2017. 1, 2, 5, 6
|
| 316 |
+
[37] Charles R. Qi, Li Yi, Hao Su, and Leonidas J. Guibas. Point-net++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. In Adv. Neural Inf. Process. Syst., 2017. 1, 2, 5, 6
|
| 317 |
+
[38] Rahul Raguram, Ondrej Chum, Marc Pollefeys, Jiri Matas, and Jan-Michael Frahm. USAC: a Universal Framework for Random Sample Consensus. IEEE Trans. on Pattern Anal. Mach. Intell., 35(8):2022-2038, 2012. 7
|
| 318 |
+
[39] René Ranftl and Vladlen Koltun. Deep Fundamental Matrix Estimation. In European Conf. on Comput. Vis., 2018. 1, 2, 7
|
| 319 |
+
[40] Peter J. Rousseeuw. Least Median of Squares Regression. Journal of the American Statistical Association, 1984. 7
|
| 320 |
+
[41] Tim Salimans and Diederik P. Kingma. Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks. In Adv. Neural Inf. Process. Syst., 2016. 3
|
| 321 |
+
[42] Yiru Shen, Chen Feng, Yaoqing Yang, and Dong Tian. Mining Point Cloud Local Structures by Kernel Correlation and Graph Pooling. In Conf. on Comput. Vis. Pattern Recognit., pages 4548-4557, 2018. 2
|
| 322 |
+
[43] Noah Snavely, Rahul Garg, Steven M Seitz, and Richard Szeliski. Finding Paths Through the World's Photos. ACM Trans. on Graphics, 2008. 1
|
| 323 |
+
[44] Maxim Tatarchenko, Jaesik Park, Vladlen Koltun, and Qian-Yi Zhou. Tangent Convolutions for Dense Prediction in 3D. In Conf. on Comput. Vis. Pattern Recognit., 2018. 2
|
| 324 |
+
[45] Philip H.S. Torr and Andrew Zisserman. MLESAC: A New Robust Estimator with Application to Estimating Image Geometry. Comput. Vis. Image Understanding, 78:138-156, 2000. 2, 5, 7
|
| 325 |
+
[46] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Instance Normalization: The Missing Ingredient for Fast Stylization. arXiv Preprint, 2016. 1, 2, 3
|
| 326 |
+
[47] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. DeMoN: Depth and Motion Network for Learning Monocular Stereo. In Conf. on Comput. Vis. Pattern Recognit., 2017. 1
|
| 327 |
+
[48] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia
|
| 328 |
+
|
| 329 |
+
Polosukhin. Attention Is All You Need. In Adv. Neural Inf. Process. Syst., 2017. 3
|
| 330 |
+
[49] Fei Wang, Mengqing Jiang, Chen Qian, Shuo Yang, Cheng Li, Honggang Zhang, Xiaogang Wang, and Xiaou Tang. Residual Attention Network for Image Classification. In Conf. on Comput. Vis. Pattern Recognit., 2017. 2, 11
|
| 331 |
+
[50] Changchang Wu. Towards Linear-Time Incremental Structure from Motion. In 3DV, 2013. 6
|
| 332 |
+
[51] Yuxin Wu and Kaiming He. Group Normalization. In European Conf. on Comput. Vis., 2018. 1, 2, 3, 4, 5, 8
|
| 333 |
+
[52] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3D ShapeNets: A Deep Representation for Volumetric Shapes. In Conf. on Comput. Vis. Pattern Recognit., 2015. 5, 6
|
| 334 |
+
[53] Saining Xie, Sainan Liu, Zeyu Chen, and Zhuowen Tu. Attentional ShapeContextNet for Point Cloud Recognition. In Conf. on Comput. Vis. Pattern Recognit., 2018. 3
|
| 335 |
+
[54] Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhutdinov, Richard Zemel, and Yoshua Bengio. Show, Attend and Tell: Neural Image Caption Generation with Visual Attention. Int. Conf. on Mach. Learn., 2015. 3
|
| 336 |
+
[55] Kwang Moo Yi, Eduard Trulls, Yuki Ono, Vincent Lepetit, Mathieu Salzmann, and Pascal Fua. Learning to Find Good Correspondences. In Conf. on Comput. Vis. Pattern Recognit., 2018. 1, 2, 3, 4, 5, 6, 7, 8, 11, 12
|
| 337 |
+
[56] Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Ruslan R Salakhutdinov, and Alexander J Smola. Deep Sets. In Adv. Neural Inf. Process. Syst., 2017. 2
|
| 338 |
+
[57] Amir R. Zamir, Tilman Wekel, Pulkit Argrawal, Colin Weil, Jitendra Malik, and Silvio Savarese. Generic 3D Representation via Pose Estimation and Matching. In European Conf. on Comput. Vis., 2016. 1
|
| 339 |
+
[58] Jiahui Zhang, Dawei Sun, Zixin Luo, Anbang Yao, Lei Zhou, Tianwei Shen, Yurong Chen, Long Quan, and Hongen Liao. Learning Two-View Correspondences and Geometry Using Order-Aware Network. In Int. Conf. on Comput. Vis., 2019, 2, 5, 6, 7, 12
|
| 340 |
+
[59] Chen Zhao, Zhiguo Cao, Chi Li, Xin Li, and Jiaqi Yang. NM-Net: Mining Reliable Neighbors for Robust Feature Correspondences. Conf. on Comput. Vis. Pattern Recognit., 2019, 1, 2, 5
|
| 341 |
+
[60] Tinghui Zhou, Matthew Brown, Noah Snavely, and David Lowe. Unsupervised Learning of Depth and Ego-Motion from Video. In Conf. on Comput. Vis. Pattern Recognit., 2017. 1
|
| 342 |
+
[61] Yin Zhou and Oncel Tuzel. VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection. In Conf. on Comput. Vis. Pattern Recognit., 2018. 2
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d835c4771e97759d79c92080af4a0a4c5efb944574e2c9fc939f9e9513f9dae3
|
| 3 |
+
size 435126
|
acneattentivecontextnormalizationforrobustpermutationequivariantlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad4a398d766bcb31a11f7b832eb06d07517d25ddb8f797c5adbd859e0d887cbb
|
| 3 |
+
size 462067
|
actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c509473b989e12210695480226052433df867d2044d96d0da3c9511521be722
|
| 3 |
+
size 72959
|
actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b86df699f4ece7b749ffac4f3446e428ebbc786acc3f4ef53dce8bb12cd207c
|
| 3 |
+
size 90502
|
actbertlearninggloballocalvideotextrepresentations/54957bfe-d7fd-4f02-b231-53cc8e938b38_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5816976afeda790a3061bc7e67aacf79a6d7889f93b21591427e83c7333e7948
|
| 3 |
+
size 924479
|
actbertlearninggloballocalvideotextrepresentations/full.md
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ActBERT: Learning Global-Local Video-Text Representations
|
| 2 |
+
|
| 3 |
+
Linchao Zhu $^{1,2}$ and Yi Yang $^{2*}$ $^{1}$ Baidu Research $^{2}$ ReLER, University of Technology Sydney {linchao.zhu, yi.yang}@uts.edu.au
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
In this paper, we introduce ActBERT for self-supervised learning of joint video-text representations from unlabeled data. First, we leverage global action information to catalyze mutual interactions between linguistic texts and local regional objects. It uncovers global and local visual clues from paired video sequences and text descriptions for detailed visual and text relation modeling. Second, we introduce a TaNgled Transformer block (TNT) to encode three sources of information, i.e., global actions, local regional objects, and linguistic descriptions. Global-local correspondences are discovered via judicious clues extraction from contextual information. It enforces the joint video-text representation to be aware of fine-grained objects as well as global human intention. We validate the generalization capability of ActBERT on downstream video-and-language tasks, i.e., text-video clip retrieval, video captioning, video question answering, action segmentation, and action step localization. ActBERT significantly outperforms the state-of-the-art, demonstrating its superiority in video-text representation learning.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
While supervised learning has been successful in a variety of computer vision tasks [17, 9, 38, 29], self-supervised representation learning from unlabeled data has attracted increasing attention in recent years [4, 27]. In self-supervised learning, a model is first pre-trained on a large amount of unlabeled data with a surrogate loss. The fine-tuning process further helps the pre-trained model to be specialized in downstream tasks. Recently, there has been rapid progress in self-supervised representation learning for texts [7, 45], where the Bidirectional Encoder Representations from Transformers (BERT) model [7] generalizes remarkably to many natural language tasks, e.g., question answering [2].
|
| 12 |
+
|
| 13 |
+
Motivated by BERT's success in self-supervised training, we aim to learn an analogous model for video and text joint modeling. We exploit video-text relations based on narrated instructional videos, where the aligned texts are detected by off-the-shelf automatic speech recognition (ASR) models. These instructional videos serve as natural sources for video-text relationship studies. First, they are vastly available and freely accessible on YouTube and other platforms [26, 33]. Second, the visual frames are aligned with the instructional narrations. The text narrations not only cover the objects in the scene explicitly but identify the salient action in the video clip.
|
| 14 |
+
|
| 15 |
+
To generalize BERT to video-and-language tasks, Sun et al. [33] extended the BERT model by learning from quantized video frame features. The original BERT takes discrete elements as inputs and predicts the corresponding tokens as the output. In contrast, visual features are distributed representations with real value, while the real-value features cannot be directly categorized into discrete labels for "visual token" prediction. Sun et al. [33] discretized visual features into visual words via clustering. These visual tokens can be directly passed to the original BERT model. However, detailed local information, e.g., interacting objects, human actions would be possibly lost during clustering. It prevents the model from uncovering fine-grained relations between video and text. In this paper, we propose ActBERT to learn a joint video-text representation that uncovers global and local visual clues from paired video sequences and text descriptions. Both the global and the local visual signals interact with the semantic stream mutually. ActBERT leverages profound contextual information and exploits fine-grained relations for video-text joint modeling.
|
| 16 |
+
|
| 17 |
+
First, ActBERT incorporates global actions, local regional objects and text descriptions in a joint framework. Actions, e.g., "cut", "rotate", "slice", are essential to various video-related downstream tasks. The recognition of human actions can demonstrate the model's capacity in motion understanding and complex human intention reasoning. It could be beneficial to explicitly model human actions during model pre-training. Long-term action sequences furthermore offer temporal dependencies about an instruc
|
| 18 |
+
|
| 19 |
+
tional task. Though action clues are important, they are largely ignored in previous self-supervised video-text training [33, 26], where actions are treated identically to objects. To model human actions, we first extract verbs from the text descriptions and construct an action classification dataset from the original dataset. Then, a 3D convolution network is trained to predict the action labels. The features from the optimized network are used as the action embedding. In this way, clip-level actions are represented, and the corresponding action label is inserted. Besides global action information, we incorporate local regional information to provide fine-grained visual cues [21, 34, 32, 19, 5]. Object regions provide detailed visual clues about the whole scene, including the regional object feature, the position of the object. The language model can benefit from the regional information for better language-and-visual alignment.
|
| 20 |
+
|
| 21 |
+
Second, we introduce a TaNgled Transformer block (TNT) to encode features from three sources, i.e., global actions, local regional objects, and linguistic tokens. Previous studies [21, 34] consider two modalities when designing the new transformer layers, i.e., fine-grained object information from image and natural language. Lu et al. [21] introduced a co-attentional transformer layer, where the key-value pairs from one modality are passed to the other modality's attention block to act as the new key-value pairs. However, in our scenario, there are three sources of inputs. The two sources, i.e., local regional features and linguistic texts, offer detailed descriptions of the occurring event in the clip. The other global action feature provides the human intention in time-series as well as a straightforward clue for contextual inferring. We design a new tangled transformer block for cross-modality feature learning from three sources. To enhance the interactions between two visual cues and linguistic features, we use a separate transformer block [40] to encode each modality. The mutual cross-modal communication is later enhanced with two additional multi-head attention blocks. The action feature catalyzes mutual interactions. With the guidance from the action features, we inject visual information to the linguistic transformer, and incorporate linguistic information to the visual transformers. The tangled transformer dynamically selects judicious cues its context to facilitate the target prediction.
|
| 22 |
+
|
| 23 |
+
Furthermore, we design four surrogate tasks to train ActBERT, i.e., masked language modeling with global and local visual cues, masked action classification, masked object classification and cross-modal matching. The pre-trained ActBERT is transferred to five video-related downstream tasks, i.e., video captioning, action segmentation, text-video clip retrieval, action step localization, and video question answering. We quantitatively show ActBERT achieves the state-of-the-art performance with a clear margin.
|
| 24 |
+
|
| 25 |
+
# 2. Related Work
|
| 26 |
+
|
| 27 |
+
Video and language. There are many existing video-and-language tasks to evaluate the model's capacities in joint video-text representation learning, e.g., video question answering [36, 10, 18, 54], video captioning [46, 52], text-video retrieval [47, 41, 25], video grounding [50]. In video and language modeling, it can be difficult to learn relations between ordered video frames and their corresponding descriptions, where video temporal information and the interactions between multiple objects spatio-temporally requires to be incorporated. The dominant approach for multi-modal modeling is to leverage Recurrent Neural Networks (RNNs) and their variants, e.g., Long Short-Term Memory (LSTM) and Gated Recurrent Unit (GRU), to model sequence relations, e.g., [28, 53]. Zhou et al. [52] leveraged masked transformers in both the encoder and the decoder for dense video captioning. Most of these works are conducted on well-annotated datasets where the descriptions are manually generated, requiring considerable human interference. There are other works to learn video representations from limited annotated data [55]. The video data is a natural source to learn cross-modal representations. The text descriptions are automatically generated by off-the-shelf automatic speech recognition (ASR) models. This is more scalable and general to the model's deployment in real-world applications. In this paper, we focus on learning joint videotext representation in a self-supervised way.
|
| 28 |
+
|
| 29 |
+
Cross-modal pre-training. In the past year, many works extended BERT to model cross-modal data [21, 32, 34, 5, 19, 33]. The recent BERT model for video-text modeling [33] introduces visual words for video frames encoding, where local regional information is largely ignored. The synchronized video-audio signal is also a good test-bed for cross-modal representation learning [3, 15]. However, they leveraged low-level audio signals and only considered the synchronization nature of video data. In this work, we focus on video-text joint representation learning. Our ActBERT leverages multi-source information and achieves remarkable performance in many downstream video-text tasks.
|
| 30 |
+
|
| 31 |
+
Instructional videos. Learning from instructional videos is challenging due to its data complexity across various tasks [6, 1, 51, 26]. These videos are collected from many domains, e.g., cooking, sports, gardening. Many works also regard the transcriptions generated from instructional videos as a source of supervision [1, 51, 26]. However, we employ ActBERT to explicitly model human actions, local regions in a unified framework. We improve [26] with more specific relation modeling between videos and their description. We quantitatively demonstrated that ActBERT is more suitable for unsupervised video-text modeling.
|
| 32 |
+
|
| 33 |
+
# 3. Model Architecture
|
| 34 |
+
|
| 35 |
+
# 3.1. Preliminary
|
| 36 |
+
|
| 37 |
+
We first illustrate the original BERT [7] model. BERT [7] pre-trains a language model on large corpora in an unsupervised way. The pre-trained model is found to be flexible and beneficial to a variety of downstream tasks, e.g., question answering [2].
|
| 38 |
+
|
| 39 |
+
In BERT [7], the input entities are processed by a multilayer bidirectional transformer [40]. The embeddings of each input are processed with stacked self-attention layers to aggregate contextual features. The attention weights are adaptively generated. The output features contain contextual information about the original input sequence. In self-attention, the generated features are irrelevant to input sequence order, and it enables the output representation to be permutation-invariant. The output representation is not affected when the input sequence is shuffled. A position embedding is commonly applied to each input entity for the incorporation of sequential order clues.
|
| 40 |
+
|
| 41 |
+
In the original BERT, Devlin et al. introduced two tasks for pre-training. In the task of masked language modeling (MLM), a portion of input words are randomly masked out. These masked-out words are replaced by a special token "[MASK]". The task is to predict the masked words based on the observations from the contextual contents. The contextual contents are unmasked elements that provide useful relevant cues for the prediction of the masked word.
|
| 42 |
+
|
| 43 |
+
The other task, i.e., Next Sentence Prediction (NSP), models order information between two sentences. Two sentences are sampled from a document, and NSP aims to identify if the second sentence is adjacent to the first sentence with the correct order. The two sentences are concatenated via a token "[SEP]", so that the models can be aware of the inputs being separated sentences. The prediction is made upon the output features of the first token "[CLS)". This is a binary classification problem, and a simple sigmoid classifier is used. A prediction of "1" indicates the sentences are consecutive, and the second sentence is right after the first sentence.
|
| 44 |
+
|
| 45 |
+
# 3.2. ActBERT
|
| 46 |
+
|
| 47 |
+
# 3.2.1 Input Embeddings
|
| 48 |
+
|
| 49 |
+
There are four types of input elements in ActBERT. They are actions, image regions, linguistic descriptions and special tokens. Special tokens are used to distinguish different inputs.
|
| 50 |
+
|
| 51 |
+
Each input sequence starts with a special token "[CLS]" and ends with another token "[SEP]". We put the linguistic descriptions after "[CLS]". There are the action inputs followed by local regional features. We denote the action features as $a_1, \ldots, a_L$ , the frame region fea
|
| 52 |
+
|
| 53 |
+
tures as $r_1, \ldots, r_M$ . The sequential text descriptions is denoted as $w_1, \ldots, w_N$ . The whole sequence is denoted as $\{[\mathrm{CLS}], w_1, \ldots, w_N, [\mathrm{SEP}], a_1, \ldots, a_L, [\mathrm{SEP}], r_1, \ldots, r_M, [\mathrm{SEP}]\}$ . "[SEP]" is also inserted between different sentences. We also insert "[SEP]" between regions that are from different clips, which can help the model to identify the clip boundaries. For each input step, the final embedding feature consists of four different embeddings. The embeddings are position embedding, segment embedding, token embedding, visual feature embedding. We added a few new tokens to distinguish action features and regional object features. The visual embedding is introduced to extract visual and action information. These embeddings are added to be the final feature of ActBERT. We explain them in detail as follows.
|
| 54 |
+
|
| 55 |
+
Position embedding. Following [7], we incorporate a learnable position embedding to every input in the sequence. Since self-attention does not consider order information, position encoding offers a flexible way to embed a sequence when the sequence order matters. For the actions in different clips, the position embeddings are different as the video clips are ordered. For the regions extracted from the same frame, we use the same position embedding. To distinguish regions from the same frame, we consider spatial position embedding for different spatial positions. The details will be described in "Visual (action) embedding".
|
| 56 |
+
|
| 57 |
+
Segment embedding. We consider multiple video clips for long-term video context modeling. Each video clip or video segment has a corresponding segment embedding. The elements, i.e., action inputs, regional object inputs, linguistic descriptions, have the same segment embedding in the same video clip.
|
| 58 |
+
|
| 59 |
+
Token embedding. Each word is embedded with WordPiece embeddings [42] with a 30,000 vocabulary. In addition to the special tokens mentioned above ("[CLS]", "[MASK]", "[SEP]", we introduce "[ACT]" and "[REGION]") to represent the action features and the region features extracted from video frames, respectively. Note that all action inputs have the identical token embedding, which reveals the modality of the inputs.
|
| 60 |
+
|
| 61 |
+
Visual (action) embedding. We now explain the visual (action) embedding in details. We first illustrate the procedure to obtain the action embedding. For each video clip, we extract verbs from its corresponding descriptions. For simplicity, we remove clips that do not have any verbs. We then build a vocabulary from all the extracted verbs. After verb vocabulary construction, each video clip has one or multiple category labels. We train a 3D convolutional neural network on this constructed dataset. The inputs to the 3D network is a tensor that contains an additional temporal dimension. We leverage a softmax classifier on top of the convolutional neural network. For clips with multiple labels, we normalize the one-hot label with $\ell_1$ -norm, where
|
| 62 |
+
|
| 63 |
+
the scores for all labels are summed to be 1. After the model is trained, we extract the features after global average pooling as the action features. This feature can well represent the actions that occurred in the video clip.
|
| 64 |
+
|
| 65 |
+
To obtain regional object features, we extract bounding boxes and the corresponding visual features from a pretrained object detection network. Similar to Lu et al. [21], we utilized pre-trained Faster R-CNN network [29] to extract the categorical distribution under the COCO vocabulary [20]. The image region features offer detailed visual information for visual and text relation modeling. For each region, the visual feature embeddings are the feature vectors before the output layer in the pre-trained network. Following [21], we incorporate spatial position embeddings to represent region locations with a 5-D vector. This vector consists of four box coordinates and the fraction of the region area. Specifically, we denote the vector as $\left(\frac{x_1}{W},\frac{y_1}{H},\frac{x_2}{W},\frac{y_2}{H},\frac{(x_2 - x_1)*(y_2 - y_1)}{W*H}\right)$ , where $W$ is the frame width, $H$ is the frame height, and $(x_{1},y_{1})$ and $(x_{2},y_{2})$ are the top-left and bottom-right coordinates, respectively.
|
| 66 |
+
|
| 67 |
+
This vector is then embedded to match the dimension of the visual feature. The final regional object feature is the summation of the spatial position embedding and the object detection feature.
|
| 68 |
+
|
| 69 |
+
# 3.2.2 Tangled Transformer
|
| 70 |
+
|
| 71 |
+
We design a TaNgled Transformer (TNT) to better encode three sources of information, i.e., action features, regional object features and linguistic features.
|
| 72 |
+
|
| 73 |
+
Instead of using only one transformer that treats the visual and text features equally, our tangled transformer consists of three transformers. The three transformers take three sources of features, respectively. To enhance the interactions between visual and linguistic features, we propose to inject visual information to the linguistic transformer and incorporate linguistic information to the visual transformers. With cross-modal interactions, the tangled transformer can dynamically select judicious cues for target prediction.
|
| 74 |
+
|
| 75 |
+
We denote the intermediate representations at transformer block $l$ as $h^l = \{(h_{w_0}^l, \dots, h_{w_N}^l), (h_{a_0}^l, \dots, h_{a_L}^l), (h_{r_0}^l, \dots, h_{r_M}^l)\}$ . For simplicity, we denote $h_w^l = \{h_{w_0}^l, \dots, h_{w_N}^l\}$ , $h_a^l = \{h_{a_0}^l, \dots, h_{a_L}^l\}$ , and $h_r^l = \{h_{r_0}^l, \dots, h_{r_M}^l\}$ , which are processed by $w$ -transformer, $a$ -transformer, and $r$ -transformer, respectively (Figure 1). Besides the standard multi-head attention encoding features from the same modality, we leverage the other two multi-head attention blocks to enhance mutual interactions between the transformer blocks. Specifically, we utilize $h_a^l$ to catalyze mutual interactions. We denote the multi-head attention as output $= \text{Multihead}(Q, K, V)$ , where $Q$ is the query, $K$ is the key, $V$ is the value. The details of multi-head
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
Figure 1: Our tangled transformer takes three sources of information as inputs, which enhances the interactions between linguistic features and visual features.
|
| 79 |
+
|
| 80 |
+
attention can be found in [40]. We use $h_a^l$ as a query to attend judicious cues from $h_w^l$ and $h_r^l$ :
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
c _ {w} = \text {M u l t i h e a d} \left(W _ {q} ^ {1} h _ {a} ^ {l}, W _ {k} ^ {w} h _ {w} ^ {l}, W _ {v} ^ {w} h _ {w} ^ {l}\right), \tag {1}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
c _ {r} = \text {M u l t i h e a d} \left(W _ {q} ^ {2} h _ {a} ^ {l}, W _ {k} ^ {r} h _ {r} ^ {l}, W _ {v} ^ {r} h _ {r} ^ {l}\right), \tag {2}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $W_{*}^{*}$ are learnable weights. $c_{w}$ is the blended feature from linguistic representations, while $c_{r}$ is the guided feature from regional object representation. We then generate a new key-value pair from $c_{w}$ using a linear layer. This generated key-value pair is stacked with the key-value pairs from the original $a$ -transformer and $r$ -transformer. Similarly, we generate a new key-value pair from $c_{r}$ , which is stacked with key-value pair in $w$ -transformer. With this form tangled transformer, visual and linguistic features are further associated.
|
| 91 |
+
|
| 92 |
+
Note that our tangled transformer is different from the co-attentional transformer block in [21] in several ways. First, the co-attentional transformer block simply passes the keys and values from one modality to the other modality's attention block, without further pre-processing. Second, [21] treats the two modalities equally, while our tangled block utilizes a global cue to guide the selection of local hints from linguistic and visual features. Third, the keys and values from different modalities replace the origin key-values in [21], while our tangled transformer stacks the key-value with the original one. In this way, both the linguistic and visual features are incorporated during transformer encoding.
|
| 93 |
+
|
| 94 |
+
# 3.2.3 ActBERT Training
|
| 95 |
+
|
| 96 |
+
We introduce four tasks for ActBERT pre-training. Our framework is presented in Figure 2. We naturally extend
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
Figure 2: Our ActBERT framework. We incorporate three sources of information during pre-training, i.e., global actions, local regional objects, and text descriptions. The yellow grid indicates that the action or the region object is masked out.
|
| 100 |
+
|
| 101 |
+
the Masked Language Modeling in our cross-modal setting. There are some existing extensions for image and language pre-training [21, 33], and video and language pretraining [33]. Compared to [33], we explicitly model actions and regional information in a unified framework.
|
| 102 |
+
|
| 103 |
+
Masked Language Modeling with Global and Local Visual Cues. We extend the Masked Language Modeling (MLM) task in BERT to our setting. We leverage visual cues from local regional objects and global actions to uncover the relationships between visual and linguistic entities. As described in Section 3.1, each word in the input sentence is randomly masked with a fixed probability. The task forces the model to learn from contextual descriptions, and at the same time, extract relevant visual features to facilitate prediction. When a verb word is masked out, the model should exploit the action features for a more accurate prediction. When a description of an object is masked out, local regional features can provide more contextual information. Thus, the strong model needs to align visual and linguistic inputs locally and globally. The output feature is then appended with a softmax classifier over the whole linguistic vocabulary.
|
| 104 |
+
|
| 105 |
+
Masked Action Classification. Similarly, in Masked Action Classification, the action features are masked out. The
|
| 106 |
+
|
| 107 |
+
task is to predict the masked action label based on linguistic features and object features. Explicit action prediction can be beneficial in two perspectives. First, action sequential cues can be exploited in the long-term. For example, for a video with action sequences of "get into", "rotate", "add", this task can better exploit the temporal order information regarding performing this instructional assignment. Second, the regional objects and linguistic texts are leveraged for better cross-modality modeling. Note that in Masked Action Classification, the goal is to predict the categorical label of the masked-out action feature. This task can enhance the action recognition capability of the pre-trained model, which can be further generalized to many downstream tasks, e.g., video question answering.
|
| 108 |
+
|
| 109 |
+
Masked Object Classification. In Masked Object Classification, the regional object features are randomly masked out. We follow [21] to predict a distribution over fixed vocabulary for the masked-out image region. The target distribution of the masked-out region is calculated as the softmax activation that is extracted by forwarding the region to the same pre-trained detection model in the feature extraction stage. The KL divergence between the two distributions is minimized.
|
| 110 |
+
|
| 111 |
+
Cross-modal matching. Similar to the Next Sentence Pre
|
| 112 |
+
|
| 113 |
+
diction (NSP) task, we apply a linear layer on top of the output of the first token "[CLS]". It is followed by a sigmoid classifier, indicating the relevance score of the linguistic sentences and the visual features. If the score is high, it shows that the text well-described the video clips. The model is optimized via a binary cross-entropy loss. To train this cross-modal matching task, we sample negative video-text pairs from the unlabeled dataset. We follow [26] for sampling positive pairs and negative pairs.
|
| 114 |
+
|
| 115 |
+
# 4. Experiments
|
| 116 |
+
|
| 117 |
+
In this section, we evaluate ActBERT in multiple downstream video-and-language tasks. We quantitatively evaluate the generalization capability of ActBERT on five challenging tasks, i.e., text-video clip retrieval, video captioning, video question answering, action segmentation, and action step localization.
|
| 118 |
+
|
| 119 |
+
# 4.1. ActBERT implementation details
|
| 120 |
+
|
| 121 |
+
HowTo100M. We pre-train ActBERT on the HowTo100M dataset [26]. The HowTo100M dataset is constructed by querying YouTube API. The top 200 search results are kept. This dataset covers a total of 23,611 tasks, e.g., maintenance and repair, animal rescue, food preparation. This dataset is biased towards actions, where the verbs like "go", "make", "come" being the most frequent. The nouns are also distributed in a long-tailed way, where objects like "water", "cup" are ranked top. Each video has a corresponding narration that is extracted from video subtitles. As the association between video clips and texts are not manually annotated, the video-text connection can sometimes be weak. There are cases of noisy correspondences, where the actors sometimes talk about unrelated things. Though noisy, we found pre-training on HowTo100M can still significantly improve the performance of downstream tasks.
|
| 122 |
+
|
| 123 |
+
Pre-training details. To construct video-text inputs for ActBERT pre-training, we sample video clips from the HowTo100M dataset. Instead of only using one clip for video-text joint training, we leverage multiple adjacent clips to cover a longer context. This enables ActBERT to model relations in different segments. We sample 10 adjacent video clips, and the temporal-aligned linguistic tokens are extracted to form a video-text pair.
|
| 124 |
+
|
| 125 |
+
To obtain the local regional features, we use Faster R-CNN pre-trained on the Visual Genome [16] dataset following [21]. The backbone is ResNet-101 [9]. We use the frame rate of 1 FPS to extract the regional features. Each region feature is RoI-pooled from the convolutional feature from that region. We set the detection confidence threshold as 0.4, and each frame contains at most five boxes. Transformer and co-attentional transformer blocks in the visual stream have hidden state size of 1024 and 8 attention heads.
|
| 126 |
+
|
| 127 |
+
To obtain the action features, we first construct an action classification dataset. We sample frames at 8 FPS. For each clip, we extract the verb from its text descriptions. Then, we train a ResNet-3D [39] network with a softmax classification loss. We initialized the weights of the ResNet-3D model from a pre-trained model on Kinetics [12]. The Kinetics dataset covers 400 actions from YouTube videos. The 3D convolutional network converges faster using when it is pre-trained on Kinetics. The input clip length to ResNet-3D is 32. The clip covers a 4-second video duration. The spatial shape of the input frame is $224 \times 224$ . The initial learning rate is set to 0.001. The batch size is 16. We decay the learning rate by 0.1 at iteration 100,000, and the total number of training iterations is 1,000,000. We keep other training settings unchanged following [39]. During feature extraction, we sample the central clip, and each frame is central cropped. We use the feature after global average pooling as the clip representation.
|
| 128 |
+
|
| 129 |
+
During ActBERT pre-training, $15\%$ of input features are randomly masked out. ActBERT has 12 layers of transformer blocks. Each transformer block has a hidden unit size of 768. We initialize the linguistic transformer with the BERT model pre-trained on the BookCorpus [56] and English Wikipedia. The other two transformers are randomly initialized. The network is optimized by Adam optimizer. We set the learning rate to be $10^{-5}$ . We trained the model for five epochs due to the large-scale data. We use four NVIDIA Tesla V100 GPUs for model training.
|
| 130 |
+
|
| 131 |
+
# 4.2. Results on video-and-text tasks
|
| 132 |
+
|
| 133 |
+
We evaluate ActBERT on five downstream tasks, i.e., action step localization, action segmentation, text-video clip retrieval, video captioning, and video question answering. We evaluate the five tasks on CrossTask [57], COIN [35], YouCook2 [51], and MSR-VTT [44]. Videos from the test sets of these datasets are removed during pre-training on HowTo100M.
|
| 134 |
+
|
| 135 |
+
# 4.2.1 Datasets
|
| 136 |
+
|
| 137 |
+
CrossTask: We evaluate action step localization on the CrossTask [57] dataset. CrossTask [57] contains 83 tasks and $4.7\mathrm{k}$ videos related to cooking, car maintenance, crafting, etc. We use the recall metric described in [57], which is defined by the number of step assignments that fall into the ground-truth interval, divided by the total number of steps in the video. COIN: We evaluate the action segmentation task on the recent COIN [35] dataset. COIN [35] contains 180 tasks and 11,827 videos. This dataset consists of 46,354 annotated segments. The videos are collected from YouTube. YouCook2: We evaluate text-video clip retrieval and video captioning on YouCook2. YouCook2 is a cooking video dataset collected from YouTube, covering a large variety of
|
| 138 |
+
|
| 139 |
+
<table><tr><td>Method</td><td>BLEU-3</td><td>BLEU-4</td><td>METEOR</td><td>ROUGE-L</td><td>CIDEr</td></tr><tr><td>Zhou et al. [52]</td><td>7.53</td><td>3.84</td><td>11.55</td><td>27.44</td><td>0.38</td></tr><tr><td>S3D [43]</td><td>6.12</td><td>3.24</td><td>9.52</td><td>26.09</td><td>0.31</td></tr><tr><td>VideoBert [33]</td><td>6.80</td><td>4.04</td><td>11.01</td><td>27.50</td><td>0.49</td></tr><tr><td>VideoBert + S3D [33]</td><td>7.59</td><td>4.33</td><td>11.94</td><td>28.80</td><td>0.55</td></tr><tr><td>ActBERT</td><td>8.66</td><td>5.41</td><td>13.30</td><td>30.56</td><td>0.65</td></tr></table>
|
| 140 |
+
|
| 141 |
+
cooking styles, methods, ingredients and cookwares [51]. In YouCook2, there are 89 types of recipes and totally 14k clips described with linguistic texts. Following [26], we evaluate the text-video clip retrieval task on the validation clips of YouCook2. MSR-VTT: We evaluate text-video clip retrieval and video question answering on MSR-VTT. The MSR-VTT dataset [44] is a general video dataset collected from YouTube with text descriptions. For the video question answering task, we evaluate the multiple-choice VideoQA following [47]. There are 2,990 questions in total for testing. Each test video is associated with a ground-truth caption, a correct answer, and four mismatched descriptions. For text-video clip retrieval, following [47], we use 1,000 pairs text-video for evaluation.
|
| 142 |
+
|
| 143 |
+
# 4.2.2 Video captioning
|
| 144 |
+
|
| 145 |
+
We compare our ActBERT to VideoBERT [33] on the video captioning task. We take the pre-trained action transformer as the video encoder. We follow the setup from [52] that takes the video clips from YouCook2 [51] as input, and a transformer decoder is used to decode videos to captions. We do not use the regional object transformer to fairly compare to [33]. Similar to [33], we cross-validate the hyperparameters on the training set. We report the standard evaluation metrics for captioning, i.e., BLEU, METEOR, and ROUGE, on the validation set. The model is optimized by Adam optimizer for 40k iterations. We set the initial learning rate to $1.0 \times 10^{-3}$ , and the batch size is 128. The results are shown in Table 1. We outperform VideoBERT [33] across all metrics, achieving a 1.36 improvement on METEOR. It demonstrates that our pre-trained transformer learns a better video representation. It also indicates the effectiveness of ActBERT in modeling video sequences by considering both global and local video cues. Our transformer generalizes better in video captioning.
|
| 146 |
+
|
| 147 |
+
# 4.2.3 Action segmentation
|
| 148 |
+
|
| 149 |
+
The action segmentation task in COIN is to design an action label for a video at the frame-level. To apply ActBERT to action segmentation, we fine-tune ActBERT by adding a linear classifier upon the output features for dense frame
|
| 150 |
+
|
| 151 |
+
Table 1: Video captioning results on YouCook2. We outperform VideoBERT [33] across all the metrics.
|
| 152 |
+
|
| 153 |
+
<table><tr><td>Method</td><td>Frame Accuracy (%)</td></tr><tr><td>NN-Viterbi [30]</td><td>21.17</td></tr><tr><td>VGG [31]</td><td>25.79</td></tr><tr><td>TCFPN-ISBA [8]</td><td>34.30</td></tr><tr><td>ActBERT w/o region cues</td><td>52.10</td></tr><tr><td>ActBERT</td><td>56.95</td></tr></table>
|
| 154 |
+
|
| 155 |
+
Table 2: Action segmentation results on COIN.
|
| 156 |
+
|
| 157 |
+
labeling. We do not feed the text descriptions during the fine-tuning process. The results are shown in Table 2. The baseline methods are conducted by [35]. Notably, ActBERT significantly outperforms the baselines with more than $20\%$ improvements. It shows that the pre-trained ActBERT can deal with only visual inputs when linguistic descriptions are absent. When we remove the regional information, we observe a performance drop compared to our full model. It shows that detailed local cues are important to the dense frame labeling task.
|
| 158 |
+
|
| 159 |
+
# 4.2.4 Action step localization
|
| 160 |
+
|
| 161 |
+
We evaluate action step localization on CrossTask. To fairly compare to [26], we do not fine-tune on the target dataset. We regard the step action label as the text description and directly feed the text-video pair to ActBERT. We regard the prediction for the first token "[CLS]" as the relevance score of this clip belonging to the label. We choose the action with the max relevance score as the final prediction. The results are shown in Table 3. ActBERT significantly outperforms TVJE [26] with a large margin, i.e., the average improvement is $7\%$ . We achieve even better than the supervised baseline. We remove the region cues to have a fair comparison to [26], as [26] does not use object detection features for video and text matching. The results of "ActBERT w/o region cues" also substantially outperform [26], demonstrating the effectiveness of ActBERT pre-training. Our full ActBERT model further improves performance by $4\%$ . This validates that regional information is an important source that provides detailed local object features for text-and-video matching.
|
| 162 |
+
|
| 163 |
+
# 4.2.5 Text-video clip retrieval
|
| 164 |
+
|
| 165 |
+
We evaluate ActBERT on the task of video clip retrieval with natural language queries. Given a linguistic query, it aims to rank the video clips from a gallery video set. We use the following metrics for evaluation [26], i.e., Recall@1 (R@1), Recall@5 (R@5), Recall@10 (R@10) and the median rank (Median R). We evaluate ActBERT on YouCook2 and MSR-VTT. We followed [26] to conduct the YouCook2 evaluation. The results are shown in Table 4. ActBERT
|
| 166 |
+
|
| 167 |
+
<table><tr><td></td><td>Make Kimchi Rice</td><td>Pickle Cucumber</td><td>Make Banana Ice Cream</td><td>Grill Steak</td><td>Jack Up Car</td><td>Make Jello Shots</td><td>Change Tire</td><td>Make Lemonade</td><td>Add Oil to Car</td><td>Make Latte</td><td>Build Shelves</td><td>Make Taco Salad</td><td>Make French Toast</td><td>Make Irish Coffee</td><td>Make Strawberry Cake</td><td>Make Pancakes</td><td>Make Meringue</td><td>Make Fish Curry</td><td>Average</td></tr><tr><td>Alayrac et al. [1]</td><td>15.6</td><td>10.6</td><td>7.5</td><td>14.2</td><td>9.3</td><td>11.8</td><td>17.3</td><td>13.1</td><td>6.4</td><td>12.9</td><td>27.2</td><td>9.2</td><td>15.7</td><td>8.6</td><td>16.3</td><td>13.0</td><td>23.2</td><td>7.4</td><td>13.3</td></tr><tr><td>Zhukov et al. [57]</td><td>13.3</td><td>18.0</td><td>23.4</td><td>23.1</td><td>16.9</td><td>16.5</td><td>30.7</td><td>21.6</td><td>4.6</td><td>19.5</td><td>35.3</td><td>10.0</td><td>32.3</td><td>13.8</td><td>29.5</td><td>37.6</td><td>43.0</td><td>13.3</td><td>22.4</td></tr><tr><td>Supervised [57]</td><td>19.1</td><td>25.3</td><td>38.0</td><td>37.5</td><td>25.7</td><td>28.2</td><td>54.3</td><td>25.8</td><td>18.3</td><td>31.2</td><td>47.7</td><td>12.0</td><td>39.5</td><td>23.4</td><td>30.9</td><td>41.1</td><td>53.4</td><td>17.3</td><td>31.6</td></tr><tr><td>TVJE [26]</td><td>33.5</td><td>27.1</td><td>36.6</td><td>37.9</td><td>24.1</td><td>35.6</td><td>32.7</td><td>35.1</td><td>30.7</td><td>28.5</td><td>43.2</td><td>19.8</td><td>34.7</td><td>33.6</td><td>40.4</td><td>41.6</td><td>41.9</td><td>27.4</td><td>33.6</td></tr><tr><td>ActBERT w/o region cues</td><td>37.4</td><td>29.5</td><td>39.0</td><td>42.2</td><td>29.8</td><td>37.5</td><td>35.5</td><td>37.8</td><td>33.2</td><td>32.8</td><td>48.4</td><td>25.2</td><td>37.4</td><td>35.6</td><td>42.4</td><td>47.0</td><td>46.1</td><td>30.4</td><td>37.1</td></tr><tr><td>ActBERT</td><td>41.8</td><td>33.6</td><td>42.7</td><td>46.8</td><td>33.4</td><td>43.0</td><td>40.8</td><td>41.8</td><td>38.3</td><td>37.4</td><td>52.5</td><td>30.1</td><td>41.2</td><td>40.4</td><td>46.1</td><td>51.0</td><td>49.7</td><td>35.1</td><td>41.4</td></tr></table>
|
| 168 |
+
|
| 169 |
+
Table 3: Action step localization results on CrossTask [57].
|
| 170 |
+
|
| 171 |
+
<table><tr><td>Method</td><td>Dataset</td><td>R@1</td><td>R@5</td><td>R@10</td><td>Median R</td></tr><tr><td>HGLMM [14]</td><td>YouCook2</td><td>4.6</td><td>14.3</td><td>21.6</td><td>75</td></tr><tr><td>TVJE [26]</td><td>YouCook2</td><td>4.2</td><td>13.7</td><td>21.5</td><td>65</td></tr><tr><td>TVJE +FT [26]</td><td>YouCook2</td><td>8.2</td><td>24.5</td><td>35.3</td><td>24</td></tr><tr><td>ActBERT</td><td>YouCook2</td><td>9.6</td><td>26.7</td><td>38.0</td><td>19</td></tr><tr><td>C+LSTM+SA [37]</td><td>MSR-VTT</td><td>4.2</td><td>12.9</td><td>19.9</td><td>55</td></tr><tr><td>VSE-LSTM [13]</td><td>MSR-VTT</td><td>3.8</td><td>12.7</td><td>17.1</td><td>66</td></tr><tr><td>SNUVL [48]</td><td>MSR-VTT</td><td>3.5</td><td>15.9</td><td>23.8</td><td>44</td></tr><tr><td>Kaufman et al. [11]</td><td>MSR-VTT</td><td>4.7</td><td>16.6</td><td>24.1</td><td>41</td></tr><tr><td>CT-SAN [49]</td><td>MSR-VTT</td><td>4.4</td><td>16.6</td><td>22.3</td><td>35</td></tr><tr><td>JSFusion [47]</td><td>MSR-VTT</td><td>10.2</td><td>31.2</td><td>43.2</td><td>13</td></tr><tr><td>TVJE [26]</td><td>MSR-VTT</td><td>7.5</td><td>21.2</td><td>29.6</td><td>38</td></tr><tr><td>ActBERT</td><td>MSR-VTT</td><td>8.6</td><td>23.4</td><td>33.1</td><td>36</td></tr></table>
|
| 172 |
+
|
| 173 |
+
significantly outperforms TVJE [26] and other baselines. TVJE trains a ranking loss on the HowTo100M dataset. It shows ActBERT is a better pre-training framework for video-text joint representation learning. Notably, our pretrained model achieves better retrieval performance than the finetuned TVJE model ("TVJE +FT") on YouCook2. It shows the superiority of ActBERT in self-supervised videotext representation learning. In MSR-VTT, ActBERT outperforms TVJE by $1.1\%$ on R@1 when no labeled data is accessed. Note that JSFusion [47] is a supervised method that leverages labeled video and text pairs for training.
|
| 174 |
+
|
| 175 |
+
# 4.2.6 Video question answering.
|
| 176 |
+
|
| 177 |
+
We evaluate ActBERT on the multiple-choice VideoQA task. We fine-tune the pre-trained ActBERT on the MSR-VTT training set. The video-text pairs are fed to ActBERT. We use a linear classifier upon the output feature. We use a small learning rate of 0.0001 and use Adam optimizer for training. At the inference time, we fed each candidate with the video clip to ActBERT. The final choice is made by selecting the candidates with the max matching score. The
|
| 178 |
+
|
| 179 |
+
Table 4: Text-video clip retrieval results on YouCook2 and MSR-VTT. “FT” denotes fine-tuning on the training set.
|
| 180 |
+
|
| 181 |
+
<table><tr><td>Method</td><td>Accuracy</td></tr><tr><td>Text-only BLSTM [22]</td><td>32.0</td></tr><tr><td>Text-only Human [22]</td><td>30.2</td></tr><tr><td>GoogleNet-2D + C3D [22]</td><td>35.7</td></tr><tr><td>Merging-LSTM [23]</td><td>34.2</td></tr><tr><td>SNUVL [48]</td><td>38.0</td></tr><tr><td>CT-SAN [49]</td><td>41.9</td></tr><tr><td>LR/RL LSTMs [24]</td><td>40.9</td></tr><tr><td>JSFusion [47]</td><td>45.5</td></tr><tr><td>ActBERT</td><td>48.6</td></tr></table>
|
| 182 |
+
|
| 183 |
+
Table 5: Video question answering (multiple-choices) results on MSR-VTT.
|
| 184 |
+
|
| 185 |
+
results are shown in Table 5. We compare to many baselines in this task. Without fancy joint modeling, ActBERT significantly outperforms JSFusion [47] by $3\%$ . It shows ActBERT's strong generalization from a large-scale dataset.
|
| 186 |
+
|
| 187 |
+
# 5. Conclusion
|
| 188 |
+
|
| 189 |
+
In this paper, we introduce ActBERT for joint video-text modeling in a self-supervised way. We directly model both global and local visual cues for fine-grained visual and linguistic relation learning. ActBERT takes three sources of information as input, i.e., global actions, local regional objects, and linguistic descriptions. The novel tangled transformer further enhances the communications between the three sources. Quantitative results on five video-text benchmarks demonstrate the effectiveness of ActBERT. In the future, we will consider evaluating ActBERT on video action recognition and detection. We will also improve ActBERT by designing more powerful modules for video and text modeling.
|
| 190 |
+
|
| 191 |
+
Acknowledgements. This work is supported by ARC DP200100938.
|
| 192 |
+
|
| 193 |
+
# References
|
| 194 |
+
|
| 195 |
+
[1] Jean-Baptiste Alayrac, Piotr Bojanowski, Nishant Agrawal, Josef Sivic, Ivan Laptev, and Simon Lacoste-Julien. Unsupervised learning from narrated instruction videos. In CVPR, 2016. 2, 8
|
| 196 |
+
[2] Chris Alberti, Kenton Lee, and Michael Collins. A bert baseline for the natural questions. arXiv preprint arXiv:1901.08634, 2019. 1, 3
|
| 197 |
+
[3] Relja Arandjelovic and Andrew Zisserman. Objects that sound. In ECCV, 2018. 2
|
| 198 |
+
[4] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In ECCV, 2018. 1
|
| 199 |
+
[5] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Learning universal image-text representations. arXiv preprint arXiv:1909.11740, 2019. 2
|
| 200 |
+
[6] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In ECCV, 2018. 2
|
| 201 |
+
[7] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 1, 3
|
| 202 |
+
[8] Li Ding and Chenliang Xu. Weakly-supervised action segmentation with iterative soft boundary assignment. In CVPR, pages 6508-6516, 2018. 7
|
| 203 |
+
[9] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1, 6
|
| 204 |
+
[10] Yunseok Jang, Yale Song, Youngjae Yu, Youngjin Kim, and Gunhee Kim. Tgif-qa: Toward spatio-temporal reasoning in visual question answering. In CVPR, 2017. 2
|
| 205 |
+
[11] Dotan Kaufman, Gil Levi, Tal Hassner, and Lior Wolf. Temporal tessellation: A unified approach for video analysis. In ICCV, 2017. 8
|
| 206 |
+
[12] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 6
|
| 207 |
+
[13] Ryan Kiros, Ruslan Salakhutdinov, and Richard S Zemel. Unifying visual-semantic embeddings with multimodal neural language models. arXiv preprint arXiv:1411.2539, 2014. 8
|
| 208 |
+
[14] Benjamin Klein, Guy Lev, Gil Sadeh, and Lior Wolf. Associating neural word embeddings with deep image representations using fisher vectors. In CVPR, 2015. 8
|
| 209 |
+
[15] Bruno Korbar, Du Tran, and Lorenzo Torresani. Cooperative learning of audio and video models from self-supervised synchronization. In NeurIPS, 2018. 2
|
| 210 |
+
[16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome:
|
| 211 |
+
|
| 212 |
+
Connecting language and vision using crowdsourced dense image annotations. IJCV, 123(1):32-73, 2017. 6
|
| 213 |
+
[17] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012. 1
|
| 214 |
+
[18] Jie Lei, Licheng Yu, Mohit Bansal, and Tamara L Berg. Tvqa: Localized, compositional video question answering. arXiv preprint arXiv:1809.01696, 2018. 2
|
| 215 |
+
[19] Gen Li, Nan Duan, Yuejian Fang, Daxin Jiang, and Ming Zhou. Unicoder-vl: A universal encoder for vision and language by cross-modal pre-training. arXiv preprint arXiv:1908.06066, 2019. 2
|
| 216 |
+
[20] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 4
|
| 217 |
+
[21] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurIPS, 2019. 2, 4, 5, 6
|
| 218 |
+
[22] Tegan Maharaj, Nicolas Ballas, Anna Rohrbach, Aaron Courville, and Christopher Pal. A dataset and exploration of models for understanding video data through fill-in-the-blank question-answering. In CVPR, 2017. 8
|
| 219 |
+
[23] Amir Mazaheri, Dong Zhang, and Mubarak Shah. Video fill in the blank with merging lstms. arXiv preprint arXiv:1610.04062, 2016. 8
|
| 220 |
+
[24] Amir Mazaheri, Dong Zhang, and Mubarak Shah. Video fill in the blank using lr/rl lstms with spatial-temporal attentions. In ICCV, 2017. 8
|
| 221 |
+
[25] Antoine Miech, Ivan Laptev, and Josef Sivic. Learning a text-video embedding from incomplete and heterogeneous data. arXiv preprint arXiv:1804.02516, 2018. 2
|
| 222 |
+
[26] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In ICCV, 2019. 1, 2, 6, 7, 8
|
| 223 |
+
[27] Ishan Misra, C Lawrence Zitnick, and Martial Hebert. Shuffle and learn: unsupervised learning using temporal order verification. In ECCV, 2016. 1
|
| 224 |
+
[28] Yingwei Pan, Tao Mei, Ting Yao, Houqiang Li, and Yong Rui. Jointly modeling embedding and translation to bridge video and language. In CVPR, 2016. 2
|
| 225 |
+
[29] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In NeurIPS, 2015. 1, 4
|
| 226 |
+
[30] Alexander Richard, Hilde Kuehne, Ahsan Iqbal, and Juergen Gall. Neuralnetwork-viterbi: A framework for weakly supervised video learning. In CVPR, 2018. 7
|
| 227 |
+
[31] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 7
|
| 228 |
+
[32] Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530, 2019.2
|
| 229 |
+
|
| 230 |
+
[33] Chen Sun, Austin Myers, Carl Vondrick, Kevin Murphy, and Cordelia Schmid. Videobert: A joint model for video and language representation learning. In ICCV, 2019. 1, 2, 5, 7
|
| 231 |
+
[34] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490, 2019. 2
|
| 232 |
+
[35] Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In CVPR, 2019. 6, 7
|
| 233 |
+
[36] Makarand Tapaswi, Yukun Zhu, Rainer Stiefelhagen, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Movieqa: Understanding stories in movies through question-answering. In CVPR, 2016. 2
|
| 234 |
+
[37] Atousa Torabi, Niket Tandon, and Leonid Sigal. Learning language-visual embedding for movie understanding with natural-language. arXiv preprint arXiv:1609.08124, 2016. 8
|
| 235 |
+
[38] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3D convolutional networks. In ICCV, 2015. 1
|
| 236 |
+
[39] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In CVPR, 2018. 6
|
| 237 |
+
[40] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 2, 3, 4
|
| 238 |
+
[41] Xin Wang, Jiawei Wu, Da Zhang, Yu Su, and William Yang Wang. Learning to compose topic-aware mixture of experts for zero-shot video captioning. In AAAI, 2019. 2
|
| 239 |
+
[42] Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144, 2016. 3
|
| 240 |
+
[43] Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In ECCV, 2018. 7
|
| 241 |
+
[44] Jun Xu, Tao Mei, Ting Yao, and Yong Rui. Msr-vtt: A large video description dataset for bridging video and language. In CVPR, 2016. 6, 7
|
| 242 |
+
[45] Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V Le. Xlnet: Generalized autoregressive pretraining for language understanding. arXiv preprint arXiv:1906.08237, 2019. 1
|
| 243 |
+
[46] Li Yao, Atousa Torabi, Kyunghyun Cho, Nicolas Ballas, Christopher Pal, Hugo Larochelle, and Aaron Courville. Describing videos by exploiting temporal structure. In ICCV, pages 4507-4515, 2015. 2
|
| 244 |
+
[47] Youngjae Yu, Jongseok Kim, and Gunhee Kim. A joint sequence fusion model for video question answering and retrieval. In ECCV, 2018. 2, 7, 8
|
| 245 |
+
[48] Youngjae Yu, Hyungjin Ko, Jongwook Choi, and Gunhee Kim. Video captioning and retrieval models with semantic attention. arXiv preprint arXiv:1610.02947, 6(7), 2016. 8
|
| 246 |
+
|
| 247 |
+
[49] Youngjae Yu, Hyungjin Ko, Jongwook Choi, and Gunhee Kim. End-to-end concept word detection for video captioning, retrieval, and question answering. In CVPR, 2017. 8
|
| 248 |
+
[50] Luowei Zhou, Yannis Kalantidis, Xinlei Chen, Jason J Corso, and Marcus Rohrbach. Grounded video description. In CVPR, 2019. 2
|
| 249 |
+
[51] Luowei Zhou, Chenliang Xu, and Jason J Corso. Towards automatic learning of procedures from web instructional videos. In AAAI, 2018. 2, 6, 7
|
| 250 |
+
[52] Luowei Zhou, Yingbo Zhou, Jason J Corso, Richard Socher, and Caiming Xiong. End-to-end dense video captioning with masked transformer. In CVPR, 2018. 2, 7
|
| 251 |
+
[53] Linchao Zhu, Zhongwen Xu, and Yi Yang. Bidirectional multirate reconstruction for temporal modeling in videos. In CVPR, 2017. 2
|
| 252 |
+
[54] Linchao Zhu, Zhongwen Xu, Yi Yang, and Alexander G Hauptmann. Uncovering the temporal context for video question answering. *IJCV*, 124(3):409–421, 2017. 2
|
| 253 |
+
[55] Linchao Zhu and Yi Yang. Compound memory networks for few-shot video classification. In ECCV, 2018. 2
|
| 254 |
+
[56] Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In ICCV, 2015. 6
|
| 255 |
+
[57] Dimitri Zhukov, Jean-Baptiste Alayrac, Ramadan Gokberk Cinbis, David Fouhey, Ivan Laptev, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In CVPR, 2019. 6, 8
|
actbertlearninggloballocalvideotextrepresentations/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:53b0ef7742a09379d254e3a2cf8acbbb7b4ad6c058cb28102e18559706dfe500
|
| 3 |
+
size 356459
|
actbertlearninggloballocalvideotextrepresentations/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f893d7b40ac482959e4373d5db3534e945450d7977414c8d1a9c694b8d02902c
|
| 3 |
+
size 307139
|
actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b695384591aea973f7292ff2fe108bdea25369f2fbc0dde04b4c64f6aeabbb5d
|
| 3 |
+
size 68925
|
actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:412b202814ebe033ce854efc14adc923ce8595a2446fc953ec93e7fa99db56b0
|
| 3 |
+
size 84586
|
actionbyteslearningfromtrimmedvideostolocalizeactions/1ce4d7f8-88e8-47ce-bc2a-d47ca88948f2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:41dcf5cae15e1c3b5ab512dc14ac0fe096f5c750ce22716b991f0c68b37b7045
|
| 3 |
+
size 1558878
|
actionbyteslearningfromtrimmedvideostolocalizeactions/full.md
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ActionBytes: Learning from Trimmed Videos to Localize Actions
|
| 2 |
+
|
| 3 |
+
Mihir Jain $^{1*}$ , Amir Ghodrati $^{1*}$ , Cees G. M. Snoek $^{2}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Qualcomm AI Research†, Qualcomm Technologies Netherlands B.V.
|
| 6 |
+
|
| 7 |
+
$^{2}$ QUVA Lab, University of Amsterdam
|
| 8 |
+
|
| 9 |
+
mijain@qti=qticomghodrati@qt.i.qualified.comcgmsnoekuva.nl
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
This paper tackles the problem of localizing actions in long untrimmed videos. Different from existing works, which all use annotated untrimmed videos during training, we learn only from short trimmed videos. This enables learning from large-scale datasets originally designed for action classification. We propose a method to train an action localization network that segments a video into interpretable fragments, we call ActionBytes. Our method jointly learns to cluster ActionBytes and trains the localization network using the cluster assignments as pseudolabels. By doing so, we train on short trimmed videos that become untrimmed for ActionBytes. In isolation, or when merged, the ActionBytes also serve as effective action proposals. Experiments demonstrate that our boundary-guided training generalizes to unknown action classes and localizes actions in long videos of Thumos14, MultiThumos, and ActivityNet1.2. Furthermore, we show the advantage of ActionBytes for zero-shot localization as well as traditional weakly supervised localization, that train on long videos, to achieve state-of-the-art results.
|
| 14 |
+
|
| 15 |
+
# 1. Introduction
|
| 16 |
+
|
| 17 |
+
The goal of this paper is to determine the start, the end and the class of each action instance in a long untrimmed video. State-of-the-art approaches for action localization slide a trained model over an untrimmed video to produce classification score sequences over time [5, 18, 40]. They depend on start, end, and action class labels at training time. Weakly-supervised approaches [28, 32, 34] have demonstrated this even works when the long untrimmed training videos come with action class labels only. Different from all these works, we will localize action instances in long untrimmed videos by learning from short trimmed videos labeled with just their action class.
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: From short, trimmed videos we learn to localize actions in long, untrimmed video. During training, our method jointly learns to generate pseudo-labels from ActionBytes, and to localize them in the short video. During testing, our localization model detects the instances of the query action class in the untrimmed video.
|
| 21 |
+
|
| 22 |
+
Short trimmed videos are highly popular and easy to access for action classification. Datasets in this domain come with a large number of samples and labels [3, 4, 8, 24]. Kinetics-700 [3], for example, has nearly 650k short trimmed video clips categorized into as many as 700 action classes. In this work, we leverage datasets commonly used for action classification, the what, for tackling the task of action localization, the when. This opens up opportunities for 1) learning from larger datasets with more action classes, and 2) localizing unknown classes by transferring knowledge between trimmed and untrimmed video datasets.
|
| 23 |
+
|
| 24 |
+
However, given just short trimmed videos during training provides virtually no scope to learn about action boundaries. To overcome this limitation, we adopt a self-supervised approach to regularize our network to learn boundary-aware models. Specifically, we use intermediate layers of a CNN model to decompose a trimmed video into multiple atomic actions called ActionBytes. From these we generate pseudolabels to train a CNN to localize ActionBytes within videos. This model can be used to extract a new set of ActionBytes, so we iterate between updating ActionBytes and training the localization model using new pseudo-labels. Given a long test video, we slide our trained model over it to generate a classification score sequence for the query action, and thus localize its instances, see Figure 1.
|
| 25 |
+
|
| 26 |
+
We make three contributions in this paper. First, we define What2When, the task of localizing actions in long untrimmed videos using short trimmed videos commonly used for action classification. Second, we introduce ActionBytes: interpretable, temporally scale-invariant fragments of videos capable of spotting parts of an action. Third, we propose an iterative approach for training boundary-aware models from short videos. We experimentally show the effectiveness of our method on Thumos14 [10], MultiThumos [38] and AcitivityNet [1]. Since our approach transfers action class knowledge from trimmed videos to untrimmed videos with unseen classes, it is a natural fit for zero-shot applications. We evaluate our model in a zero-shot scenario where the label set of the short trimmed training videos and the long untrimmed test videos are disjoint. Finally, we conduct experiments on the task of weakly supervised action localization. Although our method is not designed for learning from long videos, we show the benefit of ActionBytes as action proposals in obtaining favorable performance compared to the state-of-the-art.
|
| 27 |
+
|
| 28 |
+
# 2. Related work
|
| 29 |
+
|
| 30 |
+
The problem of learning from short videos to localize action in long videos relates to multiple recognition tasks in videos.
|
| 31 |
+
|
| 32 |
+
Mid-level representation. Several works have proposed methods to automatically discover mid-level representations by segmenting an action into atomic actions [9, 15, 33]. Lan et al. [15] discover mid-level action elements by clustering spatio-temporal segments. This is done on a per-class basis. In [6, 9] the authors automatically obtain meaningful action fragments but they require temporal action annotations to do so. Alternatively, [39] also uses parts of actions but exploits their ordered fashion. Unlike all the above methods, our ActionBytes are class-agnostic, which makes them suitable to enable knowledge transfer to videos of unseen classes.
|
| 33 |
+
|
| 34 |
+
Pseudo-labeling. Recently self-supervised approaches have been proposed pseudo-labeling data in representation learning [2], label propagation for semi-supervised learning [11] and semantic segmentation [16]. This line of work relies on clustering to create pseudo-labels from unlabelled data. We also generate pseudo-labels per video during training, but for a different purpose, we use them to regularize our localization model to be sensitive to boundaries.
|
| 35 |
+
|
| 36 |
+
Self-training. Our approach can also be considered as a self-training procedure applied to the video domain, and adapted for localization in the What2When task. It differs from other self-training approaches [17, 29, 42] in many ways, but mainly because the pseudo-labels are generated at the sub-video level and are regularized for localization.
|
| 37 |
+
|
| 38 |
+
Weakly supervised. In recent times, there has been increased interest in developing models that can be trained
|
| 39 |
+
|
| 40 |
+
with weaker forms of supervision, such as video-level labels. UntrimmedNets [34] and STPN [26] formulated weakly supervised action localization as a multiple instance learning problem along with attention to locate the actions in videos. AutoLoc [32] introduced a boundary predictor built on an Outer-Inner-Constrastive loss. W-TALC [28] introduced a co-activity similarity loss that looks for similar temporal regions in a pair of videos containing a common action class. Nguyen et al. [27] proposes to model both foreground and background, while [39] exploits temporal relations among video segments. All these methods depend on the presence of multiple actions in long videos to learn to discriminate foreground action from background. Differently, we propose a method to learn action boundaries from short videos through our ActionByte mining.
|
| 41 |
+
|
| 42 |
+
Zero-shot learning. Many approaches for zero-shot and few shot learning focus on intra-dataset splits between seen and unseen classes [7, 14, 19, 37]. While others attempt cross-dataset action recognition [12, 20, 41] and some of those learn only from the image domain to recognize actions in videos [12, 20]. To avoid the use of common classes across datasets, Roitberg et al. [30] present an evaluation by filtering very similar classes between source and target. The common practice in zero-shot learning is to transfer action class knowledge through a semantic embedding space, such as attributes, word vectors or visual features. Among these, word vectors have been preferred as only category names are required for constructing the semantic embedding space. In this paper, we also employ word embeddings to map source classes to target classes while precisely following the zero-shot regime.
|
| 43 |
+
|
| 44 |
+
# 3. Method
|
| 45 |
+
|
| 46 |
+
In this section, we explain our proposed method that learns from short trimmed videos to temporally localize actions in long untrimmed videos. We first formally define the problem of What2When action localization. Then, we explain our method illustrated in Figure 2 and its components. We start by introducing ActionBytes, the basic building block of our method and give an explanation on how to extract them from videos. Next, we explain our two-step iterative pipeline that leverages ActionBytes to train localization models on short videos in a self-training fashion. Finally, we discuss the potential of ActionBytes by itself as action proposals in the video localization context.
|
| 47 |
+
|
| 48 |
+
Problem statement. Given a long test video, we aim to predict a set of action categories present in that video, together with their start and end time. During training, a set of $n$ short, single-action videos $\chi^{short} = \{x_i\}_{i=1}^n$ is given where each video $x$ has a single label $c$ , belonging to label set $C_{short} = \{c_i\}_{i=1}^{n_c}$ . During testing, a set of long untrimmed videos $\chi^{long} = \{x_i'\}_{i=1}^{n'}$ is given, where for each video $x'$ , the goal is to find the boundary of all action in-
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Figure 2: The proposed mining pipeline segments a video into ActionBytes. These are then clustered and assigned pseudolabels, which are used as a supervision signal to train the localization model. Action classes labels are from $C_{short}$ .
|
| 52 |
+
|
| 53 |
+
stances and predict their category labels, $c'$ , from the label set $C_{\text{long}} = \{c_i'\}_{i=1}^{n_c'}$ . In this paper, unless explicitly otherwise stated, we train on $\chi^{\text{short}}$ and evaluate on $\chi^{\text{long}}$ .
|
| 54 |
+
|
| 55 |
+
# 3.1. ActionBytes
|
| 56 |
+
|
| 57 |
+
It is well-known that high-level features of consecutive frames, extracted from a CNN, usually vary smoothly over time [13, 35]. Therefore, any abrupt change in feature space can represent a high-level change in the pixel space. We leverage this property to segment videos into interpretable fragments, we call ActionBytes.
|
| 58 |
+
|
| 59 |
+
Suppose $F = \{\pmb{f}_t\}_{t=1}^T$ are $d$ -dimensional features extracted using a deep model for each time instant $t$ , where $T$ is the temporal sequence length. We learn to map these features to a latent space using a latent projection module. The output of the latent projection module, $L \in \mathbf{R}^{l \times T}$ , keeps the affinity to $l$ latent concepts for each time instant (Figure 3). For a given video, we find ActionByte boundaries $B$ by looking for time instants where affinities to latent concepts change abruptly compared to the previous time instant:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
B = \{t \mid t: \sum_ {i = 1} ^ {l} | L [ i, t ] - L [ i, t - 1 ] | > \tau \} \tag {1}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
where $\tau$ is set to the $p^{th}$ percentile, so the number of ActionBytes in a video is directly proportional to its length $T$ . In general, the $p^{th}$ percentile leads to $T \times \frac{100 - p}{100}$ ActionBytes. The length of each of them varies with the video content, with average length equal to $\frac{100}{100 - p}$ .
|
| 66 |
+
|
| 67 |
+
Each boundary in the set $\bar{B}$ starts an ActionByte, $A_{i} = (B_{i}, B_{i+1} - 1)$ , resulting in $|B| - 1$ ActionBytes. Such boundaries are obtained in a class-agnostic way, but they segment a video into interpretable fragments. These ActionBytes are temporally scale-invariant as their lengths are adapted to the video content. For example, a single ActionByte can capture an atomic action regardless of the action speed. Some ActionBytes examples are shown in Figure 4.
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
Figure 3: Localization model and ActionByte extraction. The localization model is trained with classification and localization losses on pseudo-labels. The latent output $L$ is used to extract ActionBytes. Classes labels are from $C_{short}$ .
|
| 71 |
+
|
| 72 |
+
# 3.2. Mining ActionBytes
|
| 73 |
+
|
| 74 |
+
Next, we discuss how we learn a model from short videos. One can train a classification model on short videos and slide it on long test videos. However, such a model is agnostic to boundaries within the short videos, and might not be able to generate good class activation scores for localization. Here, we leverage ActionBytes, to train a discriminative, boundary-aware model from short videos. This is done by decomposing a video into multiple ActionBytes, from which we generate pseudo-labels to train our model.
|
| 75 |
+
|
| 76 |
+
The proposed pipeline for mining ActionBytes is shown in Figure 2. It has two steps that iterates between Generating pseudo-labels from ActionBytes and Training the localization model with pseudo-labels. For the creation of the pseudo-labels we take inspiration from Caron et al. [2]. We first extract $N$ ActionBytes from a set of training videos and represent each of them by averaging latent features within its boundaries. Next, we group all the ActionBytes into $K$ clusters using the $k$ -means algorithm by solving
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\min _ {C \in \mathbb {R} ^ {l \times K}} \frac {1}{N} \sum_ {n = 1} ^ {N} \min _ {y _ {n} \in \{0, 1 \} ^ {K}} \| a _ {n} - C y _ {n} \| _ {2} ^ {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+

|
| 83 |
+
Figure 4: Extracted ActionBytes, highlighted in different colors, for two examples of Baseball Pitch. The ActionBytes capture the action in four parts that are interpretable as (1) 'get into wind-up position' (red), (2) 'loading to deliver' (blue), (3) 'delivery' (pink) and (4) 'follow-through' (green). ActionBytes are scale-invariant and can adapt to varying temporal scale, e.g., the 'follow-through' extends to different number of snippets in the two examples.
|
| 84 |
+
|
| 85 |
+
where $a_{n}$ is feature vector obtained from ActionByte $n$ . Solving this problem provides a centroid matrix $C$ that is used to assign a cluster id to each ActionByte in a video. Finally, the pseudo-label vector for a video is defined as all the cluster ids assigned to ActionBytes of that video.
|
| 86 |
+
|
| 87 |
+
Having obtained the multiple pseudo-labels for each training video, we update the parameters of the localization network in the second step for classifying and localizing ActionBytes in the video (shown in Figure 3). Such training leads to a better representation of latent concepts, $L$ , of the model that in turn result in a better set of ActionBytes. Therefore, we iterate over these two steps of extracting ActionBytes and training the localization model. This approach can be seen as a regularization technique. By training the model with pseudo-labels, we avoid the risk of overfitting the model to class labels.
|
| 88 |
+
|
| 89 |
+
Localization model. Our full localization model, used in the second step of our pipeline, is shown in Figure 3. The role of this model is to learn to classify and localize ActionBytes into the assigned pseudo-labels. This is reminiscent of a model for weakly-supervised temporal localization, where each video has multiple instances of actions and temporal annotations are not available. With this motivation, we now describe our localization.
|
| 90 |
+
|
| 91 |
+
We first extract features $F = \{\pmb{f}_t\}_{t=1}^T$ from a pretrained deep network where $d$ is the feature dimension and $T$ is the temporal sequence length. We pass extracted features to a latent projection module to map the features to a set of latent concepts, from which we extract ActionBytes. For the latent projection module, we simply use a fully connected layer followed by ReLU [25].
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
L = R e L U \left(W _ {p r o j} F\right)
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where $W_{proj} \in \mathbf{R}^{l \times d}$ is the latent projection matrix and $l$ is the number of latent concepts. The output of the latent projection layer, $L$ , is passed through a linear classifier to obtain activation scores over time for pseudo-classes. On these activation sequences, following [28], we apply $k$ -max multiple-instance learning loss for classification and co
|
| 98 |
+
|
| 99 |
+
activity similarity loss for localization. For $k$ -max MIL loss, the prediction score corresponding to a class is computed as the average of its $k$ -max activations over the temporal dimension. The co-activity similarity loss is computed over class activation sequences and $L$ . For a given video and a class, a vector of similarities between class activation sequence and each row of $L$ ( $l^{th}$ latent concept) is computed. A pair of videos with a common class label will have higher similarities with the same latent concepts. This is what is enforced by this loss, which makes it a suitable localization loss in our method.
|
| 100 |
+
|
| 101 |
+
Using this model in our mining, we get predictions for the pseudo-labels. In order to translate this into predictions for the training classes, $C_{short}$ , we add a transfer layer on top of the linear classifier. This is an FC layer learned again with a $k$ -max MIL loss, but using class labels (see Figure 3). For localization at test time, we follow the two-stage thresholding scheme of [28] on the output of the transfer layer.
|
| 102 |
+
|
| 103 |
+
Knowledge transfer. In cross-dataset evaluation, the label set of seen short videos, $C_{short}$ can be different from the label set of unseen long videos, $C_{long}$ . For knowledge transfer in such cases, we follow Objects2Action [12]. We employ the skip-gram model of word2vec [21, 22] as a semantic embedding function to embed each word of a given class label as a vector. For multi-word class labels, we take the average vector of the embedded words [12, 23] to represent the label. The affinities between class labels from $C_{short}$ and $C_{long}$ are computed by cosine similarity between their embeddings. Thus, the class activation score for $C_{short}$ is transferred to that for $C_{long}$ .
|
| 104 |
+
|
| 105 |
+
The two sets of class-labels, though different, may have some overlap. To evaluate in a pure zero-shot localization set-up, we also conduct an experiment where training is done on a subset of $C_{short}$ , such that this subset does not overlap with test label set $C_{long}$ .
|
| 106 |
+
|
| 107 |
+
# 3.3. Action proposals from ActionBytes
|
| 108 |
+
|
| 109 |
+
Segmenting video into ActionBytes is critical to learn a reliable localization model from short videos. In addition
|
| 110 |
+
|
| 111 |
+
to this, ActionByte by itself is also suited for action localization as an informative action unit. We show how they can be used to form action proposals in long videos during testing. Consequently, we also demonstrate the utility of ActionBytes is not limited to the What2When set-up but also extends to the weakly-supervised set-up.
|
| 112 |
+
|
| 113 |
+
Since an ActionByte represents an interpretable part of an action, one or more ActionBytes together form a good action proposal. For a given test video, we generate action proposals, $P_{AB}$ , by merging $m \in M$ ActionBytes, where set $M$ contains the numbers of ActionBytes to be merged.
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
P _ {A B} = \bigcup_ {m \in M} \bigcup_ {i = 1} ^ {| B | - m} \left(B _ {i}, B _ {i + m} - 1\right) \tag {2}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $B_{i}$ is the start of ActionByte $i$ . $(B_{i}, B_{i+m} - 1)$ is an action proposal from $B_{i}$ to $B_{i+m} - 1$ . Each of these proposals is temporally jittered to include up to one neighboring time-step. This is to make sure the immediate neighborhood of boundaries is included in the action proposals.
|
| 120 |
+
|
| 121 |
+
ActionBytes for weakly-supervised localization. Weakly-supervised action localization is a popular task where training and testing are done on long videos i.e. $L_{short} = L_{long}$ . The ActionByte mining explained in Section 3.2 is critical to learn from short videos. But, when learning on long videos in a weakly-supervised set-up, generating pseudo-labels is not needed, as the long videos are already untrimmed w.r.t. the actual action labels. Therefore, only the localization model, without the transfer layer, is enough to learn good quality classification score sequences and ActionBytes.
|
| 122 |
+
|
| 123 |
+
# 4. Experiments
|
| 124 |
+
|
| 125 |
+
In this section, we first explain the datasets we train and evaluate our proposed method on, following the implementation details. Then we present an ablation study of our method, and next we compare our model with baselines in the What2When setup. We also conduct an experiment in a zero-shot setup and compare our model with the state-of-the-art models in the weakly-supervised regime.
|
| 126 |
+
|
| 127 |
+
Datasets. We use the validation set of Kinetics-400 [4] for training our model. It contains 17,281 single trimmed action videos belonging to 400 action classes with a maximum length of 10 seconds. For evaluation, we report on the untrimmed Thumos14 [10], MultiThumos [38] and ActivityNet1.2 [1]. Thumos14 contains 200 validation videos and 212 test videos with temporal annotations belonging to 20 action classes, with about 15.5 action instances per video on average. The length of the videos in this dataset is on average 212 seconds. MultiThumos has the same set of videos as in Thumos14, but it extends the latter from 20 action classes with 0.3 labels per frame to 65 classes with 1.5 labels per frame. Also, the average number of distinct action
|
| 128 |
+
|
| 129 |
+
classes in a video is 10.5 (compared to 1.1 in Thumos14), making it a more challenging multi-label dataset. ActivityNet1.2 has 4,819 videos for training and 2,383 videos for validation, which in the literature is used for evaluation. It has 100 classes, with on an average 1.5 action instances per video. The average length of the videos in this dataset is 115 seconds.
|
| 130 |
+
|
| 131 |
+
Implementation details. As a base network we use I3D [4] pretrained on Kinetics-400. We extract RGB and flow features from the last average-pooled layer (1024 dimensions for each stream). We use TVL1 to compute optical flow. Features are extracted from non-overlapping 16-frame chunks of video. We do not finetune the feature extractors. The network is implemented in PyTorch and trained with Adam optimizer with a learning rate of 0.001. We initialize the localization model by training on the validation set of Kinetics-400 dataset. For $k$ -max MIL loss, we set $k$ to $1/8$ of the length of the video. In all the experiments, we iterate over our pipeline for 3 iterations. The value of the $p$ percentile (sets $\tau$ in Eq. 1) determines how many ActionBytes are extracted from a given video. For Thumos14 and MultiThumos we set $p = 50$ , and for ActivityNet1.2 we use $p \in \{92, 95, 97.5, 99, 99.5\}$ . In all the experiments we set $M = \{1, 2\}$ in Eq. 2. We report the commonly used mean Average Precision (mAP) metric on snippet-level granularity for evaluating detections. For the weakly-supervised setup, experiment settings are kept similar to [28].
|
| 132 |
+
|
| 133 |
+
Localization at test time. For localization at test time, we use our trained model to generate class-activation sequences over the untrimmed test video. We follow the two-stage thresholding scheme of [28] for localizing actions. The first threshold is applied to filter out classes that have confidence score less than the mean confidence score. The second threshold is applied along the temporal axis to obtain the detections. When ActionByte proposals are added, non-maximum suppression is also applied.
|
| 134 |
+
|
| 135 |
+
# 4.1. Ablation study
|
| 136 |
+
|
| 137 |
+
In the ablation, we test on untrimmed Thumos14, and train on the validation set of trimmed Kinetics-400 dataset.
|
| 138 |
+
|
| 139 |
+
Fixed length versus scale-invariant ActionBytes. First, we evaluate the effect of ActionBytes. We run two setups: the first uses fixed-size segments, uniformly sampled along the video, and the second uses our automatically-extracted ActionByte boundaries. For the first setup we uniformly segment the video into chunks of two snippets, in order to make it comparable with the average length of ActionBytes. The final localization performance at $IoU = 0.5$ is $14.1\%$ for fixed-size segments and $15.5\%$ for ActionBytes. Automatically extracted ActionByte boundaries are preferred over uniformly sampled boundaries.
|
| 140 |
+
|
| 141 |
+
Influence of number of clusters. Next, we evaluate the
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
Figure 5: Influence of the number of clusters on localization performance. The performance increases up to 500 and decreases afterward, as over-granular clusters might not be able to represent a single ActionByte.
|
| 145 |
+
|
| 146 |
+
influence of the number of clusters for generating pseudolabels on the final localization performance. Figure 5 shows that the performance increases by increasing the number of clusters up to 500 and then decreases. This makes sense as with a large number of clusters, an ActionByte might not be represented by a single cluster centroid. Therefore, during all the experiments, we fix the number of clusters to 500.
|
| 147 |
+
|
| 148 |
+
Number of mining iterations. In Figure 6 (Left), we show how performance changes over training iterations. It increases up to a point, and then decreases slightly. This is mainly because, after few epochs, our iterative mining reaches an equilibrium point where the clustering loss stops decreasing (see Figure 6 (Right)) and the model converges to an optimum.
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 6: Iterative mining. (Left) Action localization mAP over mining iterations. Performance increases as long as the clustering loss (Right) decreases, then both get saturated.
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
|
| 155 |
+
ActionByte as proposals. As explained in Section 3.3, ActionBytes, when merged together, can act as action proposals. In this ablation, we show how the number of merged ActionBytes influences localization performance. As shown in Figure 7, using single ActionByte proposals $(M = \{1\})$ can improve the performance by more than $3\%$ compared to not using ActionByte proposals. This shows the effectiveness of ActionBytes as proposals. Merging up to 4 ActionBytes $(M = \{1,2,3,4\})$ can improve localization performance further. However, it comes with the cost
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
Figure 7: ActionByte as proposals for localization. Single ActionByte proposals ( $M = \{1\}$ ) improve mAP compared to not using ActionByte proposals. We set $M = \{1,2\}$ in all the experiments as adding more proposals increases the computational cost while bringing marginal improvement.
|
| 159 |
+
|
| 160 |
+
of processing more proposals. To keep the balance between computational cost and performance, we set $M = \{1,2\}$ in the remaining experiments. Since the ActionBytes vary in length, the proposal length also varies. This is reminiscent of commonly used anchor lengths [32]. The proposal length, for chosen $M$ and $p$ , ranges from 1 to 70 for Thumos14/MultiThumos and from 6 to 369 for ActivityNet.
|
| 161 |
+
|
| 162 |
+
# 4.2. What2When action localization
|
| 163 |
+
|
| 164 |
+
In the What2When action localization experiments, we show the benefit of our mined ActionBytes compared to the baseline. For training, we use the validation set of Kinetics-400 dataset. For evaluation, we follow the common protocol from the literature and evaluate on the test sets of Thumos14 and MultiThumos, and validation set of Activitynet1.2. Baseline is the localization model trained on the Kinetics-400 validation set, without ActionBytes and iterative training. This model generates confidence scores for 400 classes over untrimmed long videos. Then we transfer the class scores to target classes as explained in Section 3.2, and localize actions using the two-stage thresholding. Ours is our proposed deep mining method, that is similar to the baseline (and trained on the same dataset) except that we use pseudo-labels during training to regularize the model. To have a fair comparison, we keep all the hyper-parameters fixed during evaluation. Finally, for Ours (+ Proposals) we add ActionByte proposals to the pool of proposals during localization.
|
| 165 |
+
|
| 166 |
+
As shown in Table 1, the baseline performance on Thumos14 dataset for $IoU = 0.5$ is $8.4\%$ which shows the difficulty of the task. Using our model, the performance increases to $11.3\%$ . This is interesting, considering that the state-of-the-art performance for this dataset for the weakly-supervised regime where training and test is done on the same dataset is just $26.5\%$ [27] (see Table 3). Finally, by
|
| 167 |
+
|
| 168 |
+
Table 1: What2When action localization performance on Thumos14, ActivityNet1.2 and MultiThumos.
|
| 169 |
+
|
| 170 |
+
<table><tr><td rowspan="2"></td><td colspan="4">Thumos14</td><td colspan="4">ActivityNet1.2</td><td colspan="4">MultiThumos</td></tr><tr><td>0.3</td><td>0.4</td><td>0.5</td><td>0.7</td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.7</td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.7</td></tr><tr><td>Baseline</td><td>18.8</td><td>12.7</td><td>8.4</td><td>1.7</td><td>24.0</td><td>21.7</td><td>19.4</td><td>8.0</td><td>7.5</td><td>4.9</td><td>3.2</td><td>0.6</td></tr><tr><td>Ours</td><td>21.1</td><td>15.6</td><td>11.3</td><td>2.8</td><td>24.4</td><td>22.4</td><td>20.1</td><td>8.2</td><td>8.1</td><td>5.7</td><td>4.1</td><td>1.0</td></tr><tr><td>Ours (+ Proposals)</td><td>26.1</td><td>20.3</td><td>15.5</td><td>3.7</td><td>24.7</td><td>22.7</td><td>20.3</td><td>8.3</td><td>10.8</td><td>8.1</td><td>6.1</td><td>1.4</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 2: Zero-shot action localization performance on Thumos14 and MultiThumos in What2When setup.
|
| 173 |
+
|
| 174 |
+
<table><tr><td></td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td></tr><tr><td>Thumos14</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Baseline</td><td>13.8</td><td>11.1</td><td>7.1</td><td>4.7</td><td>3.1</td></tr><tr><td>Ours</td><td>14.9</td><td>12.6</td><td>8.5</td><td>6.1</td><td>4.1</td></tr><tr><td>Ours (+ Proposals)</td><td>17.8</td><td>15.5</td><td>11.3</td><td>8.7</td><td>6.3</td></tr><tr><td>MultiThumos</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Baseline</td><td>6.4</td><td>5.14</td><td>3.1</td><td>2.0</td><td>1.3</td></tr><tr><td>Ours</td><td>7.0</td><td>5.7</td><td>3.7</td><td>2.5</td><td>1.7</td></tr><tr><td>Ours (+ Proposals)</td><td>9.4</td><td>8.0</td><td>5.6</td><td>4.1</td><td>3.0</td></tr></table>
|
| 175 |
+
|
| 176 |
+
adding ActionByte proposals, the performance increases to $15.5\%$ i.e. an $84\%$ relative improvement overall. This also shows the effectiveness of our ActionBytes as proposals, which is mainly due to their complementary nature to the baseline proposals. The improvements are obtained across the IoUs, especially for the higher ones.
|
| 177 |
+
|
| 178 |
+
For ActivityNet1.2 the baseline obtains an mAP of $19.4\%$ at $IoU = 0.5$ , while our full model gets to $20.3\%$ . The gains are less compared to Thumos14 but consistent across the IoUs. The reduced gains can be attributed to the nature of temporal annotations, which merge several nearby action instances and in-between pauses into one instance. This meant extra false-positives, as ActionByte proposals do well at separating actions from temporal context.
|
| 179 |
+
|
| 180 |
+
For results on MultiThumos the trend is similar to Thumos14, mining and then ActionByte proposals consistently improve performance across the IoU thresholds. It is promising that the proposed method maintains its gain on this more challenging multi-label dataset.
|
| 181 |
+
|
| 182 |
+
# 4.3. Zero-shot action localization
|
| 183 |
+
|
| 184 |
+
For this set of experiments, we have a similar setup to the previous What2When experiment, except that we adhere to a zero-shot premise and exclude common classes between the source Kinetics-400 dataset and the target datasets. Thus, during training, we exclude 18 classes of Kinetics-400 for Thumos14/MultiThumos. Similarly, 72 classes of Kinetics-400 are excluded for ActivityNet1.2, which leaves classes that are semantically very different from those of
|
| 185 |
+
|
| 186 |
+
Table 3: Weakly-supervised localization on Themos14 dataset. (*) indicates I3D features.
|
| 187 |
+
|
| 188 |
+
<table><tr><td></td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.7</td></tr><tr><td colspan="5">Strong supervision</td></tr><tr><td>Shou et al. [31]</td><td>40.1</td><td>29.4</td><td>23.3</td><td>7.9</td></tr><tr><td>Xu et al. [36]</td><td>44.8</td><td>35.6</td><td>28.9</td><td>-</td></tr><tr><td>Zhao et al. [40]</td><td>50.6</td><td>40.8</td><td>29.1</td><td>-</td></tr><tr><td>Chao et al. * [5]</td><td>53.2</td><td>48.5</td><td>42.8</td><td>20.8</td></tr><tr><td colspan="5">Weak supervision</td></tr><tr><td>Nguyen et al. * [26]</td><td>35.5</td><td>25.8</td><td>16.9</td><td>4.3</td></tr><tr><td>Shou et al. [32]</td><td>35.8</td><td>29.0</td><td>21.2</td><td>5.8</td></tr><tr><td>Paul et al. * [28]</td><td>40.1</td><td>31.1</td><td>22.8</td><td>7.6</td></tr><tr><td>Yu et al. * [39]</td><td>39.5</td><td>-</td><td>24.5</td><td>7.1</td></tr><tr><td>Nguyen et al. * [27]</td><td>46.6</td><td>37.5</td><td>26.5</td><td>9.0</td></tr><tr><td>Ours* (Proposals)</td><td>43.0</td><td>35.8</td><td>29.0</td><td>9.5</td></tr></table>
|
| 189 |
+
|
| 190 |
+
ActivityNet1.2. The remaining classes are semantically very different from those of ActivityNet1.2, resulting in a much lower baseline mAP of $2.6\%$ at $IoU = 0.3$ compared to $24.0\%$ in the What2When experiment. As ActivityNet1.2 is not suitable for zero-shot transfer from Kinetics-400, we evaluate on the other two datasets in Table 2. Compared to the What2When results there is a drop in performance, which is expected, considering the difficulty of the task. However, the same trend is maintained: our mining model performs better than the baseline and adding ActionByte proposals further adds to the localization performance. Again, we observe considerable gains over the baseline for both Thumos14 and MultiThumos, leading to consistent improvement across the IoUs. We believe that these are the first zero-shot temporal localization results reported on Thumos14 and MultiThumos.
|
| 191 |
+
|
| 192 |
+
# 4.4. Comparison with the state-of-the-art
|
| 193 |
+
|
| 194 |
+
Here, we demonstrate the effectiveness of our ActionByte proposals in a weakly-supervised setup as explained in Section 3.3. We employ the off-the-shelf model of Paul et al. [28] as baseline and add ActionBytes proposals on top of it. For the Thumos14 dataset, we train the model on the validation set and evaluate on the test set. Similar as before, we use IoU between detections and ground-truth as the evalua
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Figure 8: Qualitative results showing top localizations on sample videos from Soccer Penalty and Basketball Dunk. Frames representing action instances are highlighted by the orange boxes and the ones for the background are in blue boxes. Below these frames, ground-truth is plotted in red against time in seconds. Localization boundaries are shown in other colors for the baseline detections as well as the detections using the ActionByte proposals. In Soccer Penalty example, there is only one true-positive which is missed by the baseline, while it is populated by our proposals, one of which detects it. Both methods have false positives. The second example of Basketball Dunk is a video longer than 10 minutes, with many action instances. Out of shown 16 instances, our approach could localize 6 while getting 3 false-positives at $IoU = 0.5$ . Two of these false-positives are duplicate detections (in cyan near 620s and 650s). The baseline could localize two action instances with one false-positive. There are a few false-positives and missed detection by our approach, but it could localize some very difficult action instances. Figure best viewed in color.
|
| 198 |
+
|
| 199 |
+
tion metric. As shown in Table 3, our method outperforms the state-of-the-art for higher overlap thresholds. Our improvement is particularly notable at $IoU = 0.5$ , where we improve the state-of-the-art by a margin of $2.4\%$ . It validates that our ActionByte proposals are suitable for both What2When and weakly supervised tasks. In Table 4, results on ActivityNet1.2 are reported. We outperform state-of-the-art for all $IoUs$ except 0.7. In Table 5, we report results for MultiThumos. To our knowledge, the only video-level localization results reported on MultiThumos is by Yeung et al. [38]. While they report $32.4\%$ at $IoU = 0.1$ , with frame-level supervision, we reach this mAP with weak supervision only. To the best of our knowledge, this is the first weakly-supervised evaluation on MultiThumos. We also evaluate our baseline [28] on this dataset and consistently improve it over the $IoU$ thresholds. In summary, our method could improve over the baselines and achieve promising results on all three datasets. This shows the effectiveness of the ActionByte proposals. We show some qualitative results of our detections in Figure 8.
|
| 200 |
+
|
| 201 |
+
# 5. Conclusions
|
| 202 |
+
|
| 203 |
+
We introduced the new task of learning from short trimmed videos to localize actions in long untrimmed videos. To tackle the new task, our proposed pipeline is jointly trained to segment the videos into ActionBytes and localize them in the short video. Our method can be consid-
|
| 204 |
+
|
| 205 |
+
Table 4: Weakly-supervised localization on ActivityNet1.2 dataset. (*) indicates I3D features.
|
| 206 |
+
|
| 207 |
+
<table><tr><td></td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.7</td></tr><tr><td>Wang et al. [34]</td><td>-</td><td>-</td><td>7.4</td><td>3.9</td></tr><tr><td>Shou et al. [32]</td><td>-</td><td>-</td><td>27.3</td><td>17.5</td></tr><tr><td>Paul et al. * [28]</td><td>45.5</td><td>41.6</td><td>37.0</td><td>14.6</td></tr><tr><td>Yu et al. * [39]</td><td>-</td><td>-</td><td>28.3</td><td>18.9</td></tr><tr><td>Ours* (Proposals)</td><td>47.8</td><td>44.0</td><td>39.4</td><td>15.4</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 5: Weakly-supervised localization on MultiThumos dataset. (*) indicates I3D features. †Our evaluation of [28].
|
| 210 |
+
|
| 211 |
+
<table><tr><td></td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td></tr><tr><td>Strong supervision</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Yeung et al. [38]</td><td>32.4</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Weak supervision</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Paul et al. *†</td><td>30.7</td><td>24.0</td><td>17.1</td><td>12.6</td><td>8.9</td></tr><tr><td>Ours* (Proposals)</td><td>32.4</td><td>26.8</td><td>20.5</td><td>15.7</td><td>12.1</td></tr></table>
|
| 212 |
+
|
| 213 |
+
ered as a technique to regularize action boundaries during training. Experiments on the three datasets show the effectiveness of our method not only for the proposed task, but also for zero-shot action localization and weakly supervised action localization. This demonstrates the adaptability of the models trained by our method, as we considerably improve over the baselines and achieve state-of-the-art results.
|
| 214 |
+
|
| 215 |
+
# References
|
| 216 |
+
|
| 217 |
+
[1] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In CVPR, 2015. 2, 5
|
| 218 |
+
[2] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In ECCV, 2018. 2, 3
|
| 219 |
+
[3] Joao Carreira, Eric Noland, Chloe Hillier, and Andrew Zisserman. A short note on the kinetics-700 human action dataset. arXiv preprint arXiv:1907.06987, 2019. 1
|
| 220 |
+
[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 1, 5
|
| 221 |
+
[5] Yu-Wei Chao, Sudheendra Vijayanarasimhan, Bryan Seybold, David A Ross, Jia Deng, and Rahul Sukthankar. Rethinking the faster r-cnn architecture for temporal action localization. In CVPR, 2018. 1, 7
|
| 222 |
+
[6] Adrien Gaidon, Zaid Harchaoui, and Cordelia Schmid. Temporal localization of actions with actons. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(11):2782-2795, 2013. 2
|
| 223 |
+
[7] Chuang Gan, Tianbao Yang, and Boqing Gong. Learning attributes equals multi-source domain generalization. In CVPR, 2016. 2
|
| 224 |
+
[8] Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, Florian Hoppe, Christian Thurau, Ingo Bax, and Roland Memisevic. The “something something” video database for learning and evaluating visual common sense. In ICCV, 2017. 1
|
| 225 |
+
[9] Rui Hou, Mubarak Shah, and Rahul Sukthankar. Real-time temporal action localization in untrimmed videos by sub-action discovery. In BMVC, 2017. 2
|
| 226 |
+
[10] Haroon Idrees, Amir R Zamir, Yu-Gang Jiang, Alex Gorban, Ivan Laptev, Rahul Sukthankar, and Mubarak Shah. The thumos challenge on action recognition for videos “in the wild”. Computer Vision and Image Understanding, 155:1-23, 2017. 2, 5
|
| 227 |
+
[11] Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. Label propagation for deep semi-supervised learning. In CVPR, 2019. 2
|
| 228 |
+
[12] Mihir Jain, Jan C van Gemert, Thomas Mensink, and Cees GM Snoek. Objects2action: Classifying and localizing actions without any video example. In ICCV, 2015. 2, 4
|
| 229 |
+
[13] Dinesh Jayaraman and Kristen Grauman. Slow and steady feature analysis: higher order temporal coherence in video. In CVPR, 2016. 3
|
| 230 |
+
[14] Elyor Kodirov, Tao Xiang, Zhenyong Fu, and Shaogang Gong. Unsupervised domain adaptation for zero-shot learning. In ICCV, 2015. 2
|
| 231 |
+
[15] Tian Lan, Yuke Zhu, Amir Roshan Zamir, and Silvio Savarese. Action recognition by hierarchical mid-level action elements. In ICCV, 2015. 2
|
| 232 |
+
|
| 233 |
+
[16] Mans Larsson, Erik Stenborg, Carl Toft, Lars Hammarstrand, Torsten Sattler, and Fredrik Kahl. Fine-grained segmentation networks: Self-supervised segmentation for improved long-term visual localization. In ICCV, 2019. 2
|
| 234 |
+
[17] D. Lee. Pseudo-label: the simple and efficient semi-supervised learning method for deep neural networks. In ICML, 2013. 2
|
| 235 |
+
[18] Tianwei Lin, Xu Zhao, Haisheng Su, Chongjing Wang, and Ming Yang. Bsn: Boundary sensitive network for temporal action proposal generation. In ECCV, 2018. 1
|
| 236 |
+
[19] Jingen Liu, Benjamin Kuipers, and Silvio Savarese. Recognizing human actions by attributes. In CVPR, 2011. 2
|
| 237 |
+
[20] Pascal Mettes and Cees GM Snoek. Spatial-aware object embeddings for zero-shot localization and classification of actions. In ICCV, 2017. 2
|
| 238 |
+
[21] Tomas Mikolov, Kai Chen, Greg S. Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. In ICLR, 2013. 4
|
| 239 |
+
[22] Tomas Mikolov, Ilia Sutskever, Kai Chen, Greg S. Corrado, and Jeffrey Dean. Distributed representations of words and phrases and their compositionality. In NIPS, 2013. 4
|
| 240 |
+
[23] Dmitrijs Milajevs, Dimitri Kartsaklis, Mehrnoosh Sadrzadeh, and Matthew Purver. Evaluating neural word representations in tensor-based compositional settings. In EMNLP, 2014. 4
|
| 241 |
+
[24] Mathew Monfort, Alex Andonian, Bolei Zhou, Kandan Ramakrishnan, Sarah Adel Bargal, Yan Yan, Lisa Brown, Quanfu Fan, Dan Gutfreund, and Carl Vondrick. Moments in time dataset: one million videos for event understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019. 1
|
| 242 |
+
[25] Vinod Nair and Geoffrey E Hinton. Rectified linear units improve restricted boltzmann machines. In ICML, 2010. 4
|
| 243 |
+
[26] Phuc Nguyen, Ting Liu, Gautam Prasad, and Bohyung Han. Weakly supervised action localization by sparse temporal pooling network. In CVPR, 2018. 2, 7
|
| 244 |
+
[27] Phuc Xuan Nguyen, Deva Ramanan, and Charless C Fowlkes. Weakly-supervised action localization with background modeling. In ICCV, 2019. 2, 6, 7
|
| 245 |
+
[28] Sujoy Paul, Sourya Roy, and Amit K Roy-Chowdhury. W-talc: Weakly-supervised temporal activity localization and classification. In ECCV, 2018. 1, 2, 4, 5, 7, 8
|
| 246 |
+
[29] S. Reed, H. Lee, D. Anguelov, C. Szegedy, D. Erhan, and A. Rabinovich. Training deep neural networks on noisy labels with bootstrapping. In ICLR, 2015. 2
|
| 247 |
+
[30] Alina Roitberg, Manuel Martinez, Monica Haurilet, and Rainer Stiefelhagen. Towards a fair evaluation of zero-shot action recognition using external data. In ECCV, 2018. 2
|
| 248 |
+
[31] Zheng Shou, Jonathan Chan, Alireza Zareian, Kazuyuki Miyazawa, and Shih-Fu Chang. CDC: Convolutional-deconvolutional networks for precise temporal action localization in untrimmed videos. In CVPR, 2017. 7
|
| 249 |
+
[32] Zheng Shou, Hang Gao, Lei Zhang, Kazuyuki Miyazawa, and Shih-Fu Chang. AutoLoc: Weakly-supervised temporal action localization in untrimmed videos. In ECCV, 2018. 1, 2, 6, 7, 8
|
| 250 |
+
|
| 251 |
+
[33] Kevin Tang, Li Fei-Fei, and Daphne Koller. Learning latent temporal structure for complex event detection. In CVPR, 2012. 2
|
| 252 |
+
[34] Limin Wang, Yuanjun Xiong, Dahua Lin, and Luc Van Gool. Untrimmednets for weakly supervised action recognition and detection. In CVPR, 2017. 1, 2, 8
|
| 253 |
+
[35] Laurenz Wiskott and Terrence J Sejnowski. Slow feature analysis: Unsupervised learning of invariances. Neural computation, 14(4):715-770, 2002. 3
|
| 254 |
+
[36] Huijuan Xu, Abir Das, and Kate Saenko. R-C3D: Region convolutional 3d network for temporal activity detection. In ICCV, 2017. 7
|
| 255 |
+
[37] Xun Xu, Timothy M Hospedales, and Shaogang Gong. Multi-task zero-shot action recognition with prioritised data augmentation. In ECCV, 2016. 2
|
| 256 |
+
[38] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126(2-4):375-389, 2018. 2, 5, 8
|
| 257 |
+
[39] Tan Yu, Zhou Ren, Yuncheng Li, Enxu Yan, Ning Xu, and Junsong Yuan. Temporal structure mining for weakly supervised action detection. In ICCV, 2019. 2, 7, 8
|
| 258 |
+
[40] Yue Zhao, Yuanjun Xiong, Limin Wang, Zhirong Wu, Xiaou Tang, and Dahua Lin. Temporal action detection with structured segment networks. In ICCV, 2017. 1, 7
|
| 259 |
+
[41] Yi Zhu, Yang Long, Yu Guan, Shawn Newsam, and Ling Shao. Towards universal representation for unseen action recognition. In CVPR, 2018. 2
|
| 260 |
+
[42] Y. Zou, Z. Yu, X. Liu, B.V.K. V. Kumar, and J. Wang. Confidence regularized self-training. In ICCV, 2019. 2
|
actionbyteslearningfromtrimmedvideostolocalizeactions/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7db6eca0ddd92888aa6fc022729eb7b75b90fe8b3f6155e0caa9bc4eff9b505f
|
| 3 |
+
size 500620
|
actionbyteslearningfromtrimmedvideostolocalizeactions/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00911fcee1c1bb51cec96ed0003e3cc6ad4d6f151a54e0f8df279eaf59356885
|
| 3 |
+
size 356405
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17428177971a57ba18b9355802ab36f4ea6d6806ade7b329c71750e05c1d2454
|
| 3 |
+
size 83300
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:862376b428fbac733b86bf29176063528a40bb76375b833c826538a1fa77f0d4
|
| 3 |
+
size 109644
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/5fd86401-96cc-4db0-97d1-3c9526ebc529_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94bd6c5a7c5e9fb94c3ec661ce4e97b5ca10af443e0ab190e70ac9250361d256
|
| 3 |
+
size 1216898
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/full.md
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action Genome: Actions as Compositions of Spatio-temporal Scene Graphs
|
| 2 |
+
|
| 3 |
+
Jingwei Ji Ranjay Krishna Li Fei-Fei Juan Carlos Niebles Stanford University
|
| 4 |
+
|
| 5 |
+
{jingweij, ranjaykrishna, feifeili, jniebles}@cs.stanford.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Action recognition has typically treated actions and activities as monolithic events that occur in videos. However, there is evidence from Cognitive Science and Neuroscience that people actively encode activities into consistent hierarchical part structures. However, in Computer Vision, few explorations on representations that encode event partonomies have been made. Inspired by evidence that the prototypical unit of an event is an action-object interaction, we introduce Action Genome, a representation that decomposes actions into spatio-temporal scene graphs. Action Genome captures changes between objects and their pairwise relationships while an action occurs. It contains 10K videos with 0.4M objects and 1.7M visual relationships annotated. With Action Genome, we extend an existing action recognition model by incorporating scene graphs as spatiotemporal feature banks to achieve better performance on the Charades dataset. Next, by decomposing and learning the temporal changes in visual relationships that result in an action, we demonstrate the utility of a hierarchical event decomposition by enabling few-shot action recognition, achieving $42.7\%$ mAP using as few as 10 examples. Finally, we benchmark existing scene graph models on the new task of spatio-temporal scene graph prediction.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Video understanding tasks, such as action recognition, have, for the most part, treated actions and activities as monolithic events [8, 38, 66, 87]. Most recent models proposed have resorted to end-to-end predictions that produce a single label for a long sequence of a video [10, 23, 31, 69, 72] and do not explicitly decompose events into a series of interactions between objects. On the other hand, image-based structured representations like scene graphs have cascaded improvements across multiple image tasks, including image captioning [2], image retrieval [36, 64], visual question answering [35], relationship modeling [41] and image generation [34]. The scene graph representation, introduced in Visual Genome [43], provides a scaffold that
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: We present Action Genome: a representation that decomposes actions into spatio-temporal scene graphs. Inspired by hierarchical bias theory [84] and event segmentation theory [44], Action Genome provides the scaffold to study the dynamics of actions as relationships between people and objects. This decomposition also allows us to improve action recognition, enable few-shot action detection, and introduce spatio-temporal scene graph prediction.
|
| 17 |
+
|
| 18 |
+
allows vision models to tackle complex inference tasks by breaking scenes into its corresponding objects and their visual relationships. However, decompositions for temporal events have not been explored much [50], even though representing events with structured representations could lead to more accurate and grounded action understanding.
|
| 19 |
+
|
| 20 |
+
Meanwhile, in Cognitive Science and Neuroscience, it has been postulated that people segment events into consistent groups [5, 6, 55]. Furthermore, people actively encode those ongoing activities in a hierarchical part structure — a phenomenon referred to as hierarchical bias hypothesis [84] or event segmentation theory [44]. Let's consider the action of "sitting on a sofa". The person initially starts off next to the sofa, moves in front of it, and finally sits atop it. Such decompositions can enable machines to predict future and past scene graphs with objects and relationships as an action occurs: we can predict that the person is about to sit on
|
| 21 |
+
|
| 22 |
+
Table 1: A comparison of Action Genome with existing video datasets. Built upon Charades [66], Action Genome is the first large-scale video database providing both action labels and spatio-temporal scene graph labels.
|
| 23 |
+
|
| 24 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Video hours</td><td rowspan="2"># videos</td><td rowspan="2"># action categories</td><td colspan="4">Objects</td><td colspan="4">Relationships</td></tr><tr><td>annotated</td><td>localized</td><td># categories</td><td># instances</td><td>annotated</td><td>localized</td><td># categories</td><td># instances</td></tr><tr><td>ActivityNet [8]</td><td>648</td><td>28K</td><td>200</td><td></td><td></td><td>-</td><td>-</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>HACS Clips [87]</td><td>833</td><td>0.4K</td><td>200</td><td></td><td></td><td>-</td><td>-</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>Kinetics-700 [9]</td><td>1794</td><td>650K</td><td>700</td><td></td><td></td><td>-</td><td>-</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>AVA [26]</td><td>108</td><td>504K</td><td>80</td><td></td><td></td><td>-</td><td>-</td><td>✓</td><td></td><td>49</td><td>-</td></tr><tr><td>Charades [66]</td><td>82</td><td>10K</td><td>157</td><td>✓</td><td></td><td>37</td><td>-</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>EPIC-Kitchen [15]</td><td>55</td><td>-</td><td>125</td><td>✓</td><td></td><td>331</td><td>-</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>DALY [75]</td><td>31</td><td>8K</td><td>10</td><td>✓</td><td>✓</td><td>41</td><td>3.6K</td><td></td><td></td><td>-</td><td>-</td></tr><tr><td>CAD120++ [91]</td><td>0.57</td><td>0.5K</td><td>10</td><td>✓</td><td>✓</td><td>13</td><td>64K</td><td>✓</td><td>✓</td><td>6</td><td>32K</td></tr><tr><td>Action Genome</td><td>82</td><td>10K</td><td>157</td><td>✓</td><td>✓</td><td>35</td><td>0.4M</td><td>✓</td><td>✓</td><td>25</td><td>1.7M</td></tr></table>
|
| 25 |
+
|
| 26 |
+
the sofa when we see them move in front of it. Similarly, such decomposition can also enable machines to learn from few examples: we can recognize the same action when we see a different person move towards a different chair. While that was a relatively simple decomposition, other events like "playing football", with its multiple rules and actors, can involve multifaceted decompositions. So while such decompositions can provide the scaffolds to improve vision models, how is it possible to correctly create representative hierarchies for a wide variety of complex actions?
|
| 27 |
+
|
| 28 |
+
In this paper, we introduce Action Genome, a representation that decomposes actions into spatio-temporal scene graphs. Object detection faced a similar challenge of large variation within any object category. So, just as progress in 2D perception was catalyzed by taxonomies [56], partonomies [57], and ontologies [43, 79], we aim to improve temporal understanding with Action Genome's partonomy. Going back to the example of "person sitting on a sofa", Action Genome breaks down such actions by annotating frames within that action with scene graphs. The graphs captures both the objects, person and sofa, and how their relationships evolve as the actions progress from $\langle$ person - next to - sofa $\rangle$ to $\langle$ person - in front of - sofa $\rangle$ to finally $\langle$ person - sitting on - sofa $\rangle$ . Built upon Charades [66], Action Genome provides 476K object bounding boxes with 1.72M relationships across 234K video frames with 157 action categories.
|
| 29 |
+
|
| 30 |
+
Most perspectives on action decomposition converge on the prototypical unit of action-object couplets [44, 50, 63, 84]. Action-object couplets refer to transitive actions performed on objects (e.g. "moving a chair" or "throwing a ball") and intransitive self-actions (e.g. "moving towards the sofa"). Action Genome's dynamic scene graph representations capture both such types of events and as such, represent the prototypical unit. With this representation, we enable the study for tasks such as spatio-temporal scene graph prediction — a task where we estimate the decomposition of action dynamics given a video. We can also improve existing tasks like action recognition and few-shot action detection by jointly studying how those actions change visual relationships between objects in scene graphs.
|
| 31 |
+
|
| 32 |
+
To demonstrate the utility of Action Genome's event decomposition, we introduce a method that extends a state-of-the-art action recognition model [76] by incorporating spatio-temporal scene graphs as feature banks that can be used to both predict the action as well as the objects and relationships involved. First, we demonstrate that predicting scene graphs can benefit the popular task of action recognition by improving the state-of-the-art on the Charades dataset [66] from $42.5\%$ to $44.3\%$ and to $60.3\%$ when using oracle scene graphs. Second, we show that the compositional understanding of actions induces better generalization by showcasing few-shot action recognition experiments, achieving $42.7\%$ mAP using as few as 10 training examples. Third, we introduce the task of spatio-temporal scene graph prediction and benchmark existing scene graph models with new evaluation metrics designed specifically for videos. With a better understanding of the dynamics of human-object interactions via spatio-temporal scene graphs, we aim to inspire a new line of research in more decomposable and generalizable action understanding.
|
| 33 |
+
|
| 34 |
+
# 2. Related work
|
| 35 |
+
|
| 36 |
+
We derive inspiration from Cognitive Science, compare our representation with static scene graphs, and survey methods in action recognition and few-shot prediction.
|
| 37 |
+
|
| 38 |
+
Cognitive Science. Early work in Cognitive Science provides evidence for the regularities with which people identify event boundaries [5, 6, 55]. Remarkably, people consistently, both within and between subjects, carve out video streams into events, actions, and activities [11, 28, 83]. Such findings hint that it is possible to predict when actions begin and end, and have inspired hundreds of Computer Vision datasets, models, and algorithms to study tasks like action recognition [19, 37, 71, 80, 81, 82]. Subsequent Cognitive and Neuroscience research, using the same paradigm, has also shown that event categories form partonomies [28, 60, 83]. However, Computer Vision has done little work in explicitly representing the hierarchical structures of actions [50], even though understanding event partonomies can improve tasks like action recognition.
|
| 39 |
+
|
| 40 |
+
Action recognition in videos. Many research projects have tackled the task of action recognition. A major line of work has focused on developing powerful neural architectures to extract useful representations from videos [10, 23, 31, 69, 72]. Pre-trained on large-scale databases for action classification [8, 9], these architectures serve as cornerstones for downstream video tasks and action recognition on other datasets. To assist more complicated action understanding, another growing set of research explores structural information in videos including temporal ordering [51, 88], object localization [4, 25, 32, 53, 74, 76], and implicit interactions between objects [4, 53]. In our work, we contrast against these methods by explicitly using a structured decomposition of actions into objects and relationships.
|
| 41 |
+
|
| 42 |
+
Table 1 lists some of the most popular datasets used for action recognition. One major trend of video datasets is providing considerably large amount of video clips with single action labels [8, 9, 87]. Although these databases have driven the progress of video feature representation for many downstream tasks, the provided annotations treat actions as monolithic events, and do not study how objects and their relationships change during actions/activities. In the mean time, other databases have provided more varieties of annotations: AVA [26] localizes the actors of actions, Charades [66] contains multiple actions happening at the same time, EPIC-Kitchen [15] localizes the interacted objects in ego-centric kitchen videos, DALY [75] provides object bounding boxes and upper body poses for 10 daily activities. Still, scene graph, as a comprehensive structural abstraction of images, has not yet been studied in any large-scale video database as a potential representation for action recognition. In this work, we present Action Genome, the first large-scale database to jointly boost research in scene graphs and action understanding. Compared to existing datasets, we provide orders of magnitude more object and relationship labels grounded in actions.
|
| 43 |
+
|
| 44 |
+
Scene graph prediction. Scene graphs are a formal representation for image information [36, 43] in a form of a graph, which is widely used in knowledge bases [13, 27, 89]. Each scene graph encodes objects as nodes connected together by pairwise relationships as edges. Scene graphs have led to many state of the art models in image captioning [2], image retrieval [36, 64], visual question answering [35], relationship modeling [41], and image generation [34]. Given its versatile utility, the task of scene graph prediction has resulted in a series of publications [14, 30, 43, 46, 48, 49, 59, 77, 78, 85] that have explored reinforcement learning [49], structured prediction [16, 40, 70], utilizing object attributes [20, 61], sequential prediction [59], few-shot prediction [12, 17], and graph-based [47, 77, 78] approaches. However, all of these approaches have restricted their application to static images and have not modelled visual concepts spatio-temporally.
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
Figure 2: Action Genome's annotation pipeline: For every action, we uniformly sample 5 frames across the action and annotate the person performing the action along with the objects they interact with. We also annotate the pairwise relationships between the person and those objects. Here, we show a video with 4 actions labelled, resulting in 20 $(= 4 \times 5)$ frames annotated with scene graphs. The objects are grounded back in the video as bounding boxes.
|
| 48 |
+
|
| 49 |
+
Few-shot prediction. The few-shot literature is broadly divided into two main frameworks. The first strategy learns a classifier for a set of frequent categories and then uses them to learn the few-shot categories [21, 22, 58]. For example, ZSL uses attributes of actions to enable few-shot [58]. The second strategy learns invariances or decompositions that enable few-shot classification [7, 18, 39, 90]. OSS and TARN propose a measurement of similarity or distance measure between video pairs [7, 39], CMN encodes uses a multi-saliency algorithm to encode videos [90], and ProtoGAN creates a prototype vector for each class [18]. Our framework resembles the first strategy because we use the object and visual relationship representations learned using the frequent actions to identify few-shot actions.
|
| 50 |
+
|
| 51 |
+
# 3. Action Genome
|
| 52 |
+
|
| 53 |
+
Inspired from Cognitive Science, we decompose events into prototypical action-object units [44, 63, 84]. Each action in Action Genome is represented as changes to objects and their pairwise interactions with the actor/person performing the action. We derive our representation as a temporally changing version of Visual Genome's scene graphs [43]. However, unlike Visual Genome, whose goal was to densely represent a scene with objects and visual re
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
Figure 3: Distribution of (a) relationship and (b) object occurrences. The relationships are color coded to represent attention, spatial, and contact relationships. Most relationships have at least 1k instances and objects have at least 10k instances.
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+
Table 2: There are three types of relationships in Action Genome: attention relationships report which objects people are looking at, spatial relationships indicate how objects are laid out spatially, and contact relationships are semantic relationships involving people manipulating objects.
|
| 61 |
+
|
| 62 |
+
<table><tr><td>attention</td><td>spatial</td><td colspan="2">contact</td></tr><tr><td>looking at</td><td>in front of</td><td>carrying</td><td>covered by</td></tr><tr><td>not looking at</td><td>behind</td><td>drinking from</td><td>eating</td></tr><tr><td>unsure</td><td>on the side of</td><td>have it on the back</td><td>holding</td></tr><tr><td></td><td>above</td><td>leansing on</td><td>lying on</td></tr><tr><td></td><td>beneath</td><td>not contacting</td><td>sitting on</td></tr><tr><td></td><td>in</td><td>standing on</td><td>touching</td></tr><tr><td></td><td></td><td>twisting</td><td>wearing</td></tr><tr><td></td><td></td><td>wiping</td><td>writing on</td></tr></table>
|
| 63 |
+
|
| 64 |
+
relationships, Action Genome's goal is to decompose actions and as such, focuses on annotating only those segments of the video where the action occurs and only those objects that are involved in the action.
|
| 65 |
+
|
| 66 |
+
Annotation framework. Action Genome is built upon the videos and temporal action annotations available in the Charades dataset [66], which contains 157 action classes, 144 of which are human-object activities. In Charades, there are multiple actions that might be occurring at the same time. We do not annotate every single frame in a video; it would be redundant as the changes between objects and relationships occur at longer time scales.
|
| 67 |
+
|
| 68 |
+
Figure 2 visualizes the pipeline of our annotation. We uniformly sample 5 frames to annotate across the range of each action interval. With this action-oriented sampling strategy, we provide more labels where more actions occur. For instance, in the example, actions "sitting on a chair" and "drinking from a cup" occur together and therefore, result in more annotated frames, 5 from each action. When annotating each sampled frame, the annotators hired were prompted with action labels and clips of the neighboring
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
Figure 4: A weighted bipartite mapping between objects and relationships shows that they are densely interconnected in Action Genome. The weights represent percentage of occurrences in which a specific object occurs in a relationship. There are three colors in the graph and they represent the three kinds of relationships: attention (in orange), spatial (in green) and contact (in purple).
|
| 72 |
+
|
| 73 |
+
video frames for context. The annotators first draw bounding boxes around the objects involved in these actions, then choose the relationship labels from the label set. The clips are used to disambiguate between the objects that are actually involved in an action when multiple instances of a given category is present. For example, if multiple "cups" are present, the context disambiguates which "cup" to annotate for the action "drinking from a cup".
|
| 74 |
+
|
| 75 |
+
Action Genome contains three different kinds of human-object relationships: attention, spatial and contact relationships (see Table 2). Attention relationships indicate if a person is looking at an object or not, and serve as indicators for which object the person is or will interacting with. Spa
|
| 76 |
+
|
| 77 |
+
tial relationships describe where objects are relative to one another. Contact relationships describe the different ways the person is contacting an object. A change in contact often indicates the occurrence of an actions: for example, changing from $\langle$ person - not contacting - book $\rangle$ to $\langle$ person - holding - book $\rangle$ may show an action of "picking up a book".
|
| 78 |
+
|
| 79 |
+
It is worth noting that while Charades provides an injective mapping from each action to a verb, it is different from the relationship labels we provide. Charades' verbs are clip-level labels, such as "awaken", while we decompose them into frame-level human-object relationships, such as a sequence of $\langle$ person - lying on - bed $\rangle$ , $\langle$ person - sitting on - bed $\rangle$ and $\langle$ person - not contacting - bed $\rangle$ .
|
| 80 |
+
|
| 81 |
+
Database statistics. Action Genome provides frame-level scene graph labels for the components of each action. Overall, we provide annotations for 234,253 frames with a total of 476,229 bounding boxes of 35 object classes (excluding "person"), and 1,715,568 instances of 25 relationship classes. Figure 3 visualizes the log-distribution of object and relationship categories in the dataset. Like most concepts in vision, some objects (e.g. table and chair) and relationships (e.g. in front of and not looking at) occur frequently while others (e.g. twisting and doorknob) only occur a handful of times. However, even with such a distribution, almost all objects have at least 10K instances and every relationship as at least 1K instances.
|
| 82 |
+
|
| 83 |
+
Additionally, Figure 4 visualizes how frequently objects occur in which relationships. We see that most objects are pretty evenly involved in all three types of relationships. Unlike Visual Genome, where dataset bias provides a strong baseline for predicting relationships given the object categories, Action Genome does not suffer the same bias.
|
| 84 |
+
|
| 85 |
+
# 4. Method
|
| 86 |
+
|
| 87 |
+
We validate the utility of Action Genome's action decomposition by studying the effect of combining learning spatio-temporal scene graphs with learning to recognize actions. We propose a method, named Scene Graph Feature Banks (SGFB), to incorporate spatio-temporal scene graphs into action recognition. Our method is inspired by recent work in computer vision that uses the information "banks" [1, 45, 76]. Information banks are feature representations that have been used to represent, for example, object categories that occur in the video [45], or even include where the objects are [1]. Our model is most directly related to the recent long-term feature banks [76], which accumulates features of a long video as a fixed size representation for action recognition.
|
| 88 |
+
|
| 89 |
+
Overall, our SGFB model contains two components: the first component generates spatio-temporal scene graphs while the second component encodes the graphs to predict
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
Figure 5: Overview of our proposed model, SGFB, for action recognition using spatio-temporal scene graphs. SGFB predicts scene graphs for every frame in a video. These scene graphs are converted into features representations that are then combined using methods similar to long-term feature banks [76]. The final representation is merged with 3D CNN features and used to predict action labels.
|
| 93 |
+
|
| 94 |
+
action labels. Given a video sequence $v = \{i_1, i_2, \dots, i_N\}$ , the aim of traditional multi-class action recognition is to assign multiple action labels to this video. Here, $v$ represents the video sequence made up of image frames $i_j, \forall j \in [1, N]$ . SGFB generates a spatio-temporal scene graph for every frame in the given video sequence. The scene graphs are encoded to formulate a spatio-temporal scene graph feature bank for the final task of action recognition. We describe the scene graph prediction and the scene graph feature bank components in more detail below. See Figure 5 for a high-level visualization of the model's forward pass.
|
| 95 |
+
|
| 96 |
+
# 4.1. Scene graph prediction
|
| 97 |
+
|
| 98 |
+
Previous research has proposed plenty of methods for predicting scene graphs on static images [48, 52, 77, 78, 85, 86]. We employ a state-of-the-art scene graph predictor as the first step of our method. Given a video sequence $v$ , the scene graph predictor $SG$ generates all the objects and connects each object with their relationships with the actor in each frame, i.e. $SG: I \to G$ . On each frame, the scene graph $G = (O, R)$ consists of a set of objects $O = \{o_1, o_2, \ldots\}$ that a person is interacting with and a set of relationships $R = \{\{r_{11}, r_{12}, \ldots\}, \{r_{21}, r_{22}, \ldots\}, \ldots\}$ . Here $r_{pq}$ denotes the $q$ -th relationship between the person with the object $o_p$ . Note that there can be multiple relationships between the person and each object, including attend-
|
| 99 |
+
|
| 100 |
+
tion, spatial, and contact relationships. Besides the graph labels, the scene graph predictor $SG$ also outputs confidence scores for all predicted objects: $\{s_{o_1}, s_{o_2}, \ldots\}$ and relationships: $\{\{s_{r_{11}}, s_{r_{12}}, \ldots\}, \{s_{r_{21}}, s_{r_{22}}, \ldots\}, \ldots\}$ . We have experimented with various choices of $SG$ and benchmark their performance on Action Genome in Section 5.3.
|
| 101 |
+
|
| 102 |
+
# 4.2. Scene graph feature banks
|
| 103 |
+
|
| 104 |
+
After obtaining the scene graph $G$ on each frame, we formulate a feature vector $f$ by aggregating the information across all the scene graphs into a feature bank. Let's assume there are $|O|$ classes of objects and $|R|$ classes of relationships. In Action Genome, $|O| = 35$ and $|R| = 25$ . We first construct a confidence matrix $C$ with dimension $|O| \times |R|$ , where each entry corresponds to an object-relationship category pair. We compute every entry of this matrix using the scores output by the scene graph predictor $SG$ . $C_{ij} = s_{o_i} \times s_{r_{ij}}$ . Intuitively, $C_{ij}$ is a high value when $SG$ is confident that there is an object $o_i$ in the current frame and its relationship with the actor is $r_{ij}$ . We flatten the confidence matrix as the feature vector $f$ for each image.
|
| 105 |
+
|
| 106 |
+
Formally, $F_{SG} = [f_1, f_2, \dots, f_T]$ is a sequence of scene graph features extracted from a subsample of frames $i_1, i_2, \dots, i_N$ . We aggregate the features across the frames using methods similar to long-term feature banks [76], i.e. $F_{SG}$ are combined with 3D CNN features $S$ extracted from a short-term clip using feature bank operators (FBO), which can be instantiated as mean/max pooling or non-local blocks [73]. The 3D CNN embeds short-term information into $S$ while $F_{SG}$ provides contextual information, critical in modeling the dynamics of complex actions with long time span. The final aggregated feature is then used to predict action labels for the video.
|
| 107 |
+
|
| 108 |
+
# 5. Experiments
|
| 109 |
+
|
| 110 |
+
Action Genome's representation enables us to study few-shot action recognition by decomposing actions into temporally changing visual relationships between objects. It also allows us to benchmark whether understanding the decomposition helps improve performance in action recognition or scene graph prediction individually. To study these benefits afforded by Action Genome, we design three experiments: action recognition, few-shot action recognition, and finally, spatio-temporal scene graph prediction.
|
| 111 |
+
|
| 112 |
+
# 5.1. Action recognition on Charades
|
| 113 |
+
|
| 114 |
+
We expect that grounding the components that compose an action — the objects and their relationships — will improve our ability to predict which actions are occurring in a video sequence. So, we evaluate the utility of Action Genome's scene graphs on the task of action recognition.
|
| 115 |
+
|
| 116 |
+
Problem formulation. We specifically study multi-class action recognition on the Charades dataset [66]. The Cha
|
| 117 |
+
|
| 118 |
+
Table 3: Action recognition on Charades validation set in mAP (\%). We outperform all existing methods when we simultaneously predict scene graphs while performing action recognition. We also find that utilizing ground truth scene graphs can significantly boost performance.
|
| 119 |
+
|
| 120 |
+
<table><tr><td>Method</td><td>Backbone</td><td>Pre-train</td><td>mAP</td></tr><tr><td>I3D + NL [10, 73]</td><td>R101-I3D-NL</td><td>Kinetics-400</td><td>37.5</td></tr><tr><td>STRG [74]</td><td>R101-I3D-NL</td><td>Kinetics-400</td><td>39.7</td></tr><tr><td>Timeception [31]</td><td>R101</td><td>Kinetics-400</td><td>41.1</td></tr><tr><td>SlowFast [23]</td><td>R101</td><td>Kinetics-400</td><td>42.1</td></tr><tr><td>SlowFast+NL [23, 73]</td><td>R101-NL</td><td>Kinetics-400</td><td>42.5</td></tr><tr><td>LFB [76]</td><td>R101-I3D-NL</td><td>Kinetics-400</td><td>42.5</td></tr><tr><td>SGFB (ours)</td><td>R101-I3D-NL</td><td>Kinetics-400</td><td>44.3</td></tr><tr><td>SGFB Oracle (ours)</td><td>R101-I3D-NL</td><td>Kinetics-400</td><td>60.3</td></tr></table>
|
| 121 |
+
|
| 122 |
+
rades dataset contains 9,848 crowdsourced videos with a length of 30 seconds on average. At any frame, a person can perform multiple actions out of a nomenclature of 157 classes. The multi-classification task provides a video sequence as input and expects multiple action labels as output. We train our SGFB model to predict Charades action labels during test time and during training, provide SGFB with spatio-temporal scene graphs as additional supervision.
|
| 123 |
+
|
| 124 |
+
Baselines. Previous work has proposed methods for multiclass action recognition and benchmarked on Charades. Recent state-of-the-art methods include applying I3D [10] and non-local blocks [73] as video feature extractors (I3D+NL), spatio-temporal region graphs (STRG) [74], Timeception convolutional layers (Timeception) [31], SlowFast networks (SlowFast) [23], and long-term feature banks (LFB) [76]. All the baseline methods are pre-trained on Kinetics-400 [38] and the input modality is RGB.
|
| 125 |
+
|
| 126 |
+
Implementation details. SGFB first predicts a scene graph on each frame, then constructs a spatio-temporal scene graph feature bank for action recognition. We use Faster R-CNN [62] with ResNet-101 [29] as the backbone for region proposals and object detection. We leverage RelDN [86] to predict the visual relationships. Scene graph prediction is trained on Action Genome, where we follow the same train/val splits of videos as the Charades dataset. Action recognition uses the same video feature extractor, hyper-parameters, and solver schedulers as long-term feature banks (LFB) [76] for a fair comparison.
|
| 127 |
+
|
| 128 |
+
Results. We report performance of all models using mean average precision (mAP) on Charades validation set in Table 3. By replacing the feature banks with spatio-temporal scene graph features, we outperform the state-of-the-art LFB by $1.8\%$ mAP. Our features are smaller in size ( $35 \times 25 = 875$ in SGFB versus 2048 in LFB) but concisely capture the more information for recognizing actions.
|
| 129 |
+
|
| 130 |
+
We also find that improving object detectors designed for videos can further improve action recognition results. To quantitatively demonstrate the potential of better
|
| 131 |
+
|
| 132 |
+
Table 4: Few-shot experiments. With the ability of compositional action understanding, our SGFB demonstrates better generalizability than LFB. The SGFB oracle shows the great potential of how much the scene graph representation could benefit action recognition.
|
| 133 |
+
|
| 134 |
+
<table><tr><td></td><td>1-shot</td><td>5-shot</td><td>10-shot</td></tr><tr><td>LFB [76]</td><td>28.3</td><td>36.3</td><td>39.6</td></tr><tr><td>SGFB (ours)</td><td>28.8</td><td>37.9</td><td>42.7</td></tr><tr><td>SGFB oracle (ours)</td><td>30.4</td><td>40.2</td><td>50.5</td></tr></table>
|
| 135 |
+
|
| 136 |
+
scene graphs on action recognition, we designed an SGFB Oracle setup. The SGFB Oracle assumes that a perfect scene graph prediction method is available. The spatiotemporal scene graph feature bank therefore, directly encodes a feature vector from ground truth objects and visual relationships for the annotated frames. Feeding such feature banks into the SGFB model, we observe a significant improvement on action recognition: $16\%$ increase on mAP. Such a boost in performance shows the potential of Action Genome and compositional action understanding when video-based scene graph models are utilized to improve scene graph prediction. It is important to note that the performance by SGFB Oracle is not an upper bound on performance since we only utilize ground truth scene graphs for the few frames that have ground truth annotations.
|
| 137 |
+
|
| 138 |
+
# 5.2. Few-shot action recognition
|
| 139 |
+
|
| 140 |
+
Intuitively, predicting actions should be easier from a symbolic embedding of scene graphs than from pixels. When trained with very few examples, compositional action understanding with additional knowledge of scene graphs should outperform methods that treat actions as monolithic concept. We showcase the capability and potential of spatio-temporal scene graphs to generalize to rare actions.
|
| 141 |
+
|
| 142 |
+
Problem formulation. In our few-shot action recognition experiments on Charades, we split the 157 action classes into a base set of 137 classes and a novel set of 20 classes. We first train a backbone feature extractor (R101-I3D-NL) on all video examples of the base classes, which is shared by the baseline LFB, our SGFB, and SGFB oracle. Next, we train each model with only $k$ examples from each novel class, where $k = 1, 5, 10$ , for 50 epochs. Finally, we evaluate the trained models on all examples of novel classes in the Charades validation set.
|
| 143 |
+
|
| 144 |
+
Results. We report few-shot experiment performance in Table 4. SGFB achieves better performance than LFB on all 1,5,10-shot experiments. Furthermore, if with ground truth scene graphs, SGFB Oracle shows a $10.9\%$ 10-shot mAP improvement. We visualize the comparison between SGFB and LFB in Figure 6. With the knowledge of spatiotemporal scene graphs, SGFB better captures action concepts involving the dynamics of objects and relationships.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Ground truth: Awakening in bed, Lying on a bed, Snuggling with a pillow
|
| 148 |
+
Figure 6: Qualitative results of 10-shot experiments. We compare the predictions of our SGFB against LFB [76]. Since SGFB uses scene graph knowledge and explicitly captures the dynamics of human-object relationships, it easily learns the concept of "awakening in bed" even when only trained with 10 examples of this label. Also, since SGFB is trained to detect and ground objects, it avoids misclassifying objects, such as television, which then results in more robust action recognition.
|
| 149 |
+
|
| 150 |
+
# 5.3. Spatio-temporal scene graph prediction
|
| 151 |
+
|
| 152 |
+
Progress in image-based scene graph prediction has cascaded to improvements across multiple Computer Vision tasks, including image captioning [2], image retrieval [36, 64], visual question answering [35], relationship modeling [41] and image generation [34]. In order to promote similar progress in video-based tasks, we introduce the complementary of spatio-temporal scene graph prediction. Unlike image-based scene graph prediction, which only has a single image as input, this task expects a video as input and therefore, can utilize temporal information from neighboring frames to strength its predictions. In this section, we define the task, its evaluation metrics and report benchmarked results from numerous recently proposed image-based scene graph models applied to this new task.
|
| 153 |
+
|
| 154 |
+
Problem formulation. The task expects as input a video sequence $v = \{i_1, i_2, \ldots, i_n\}$ where $i_j \forall j \in [1, n]$ represents image frames from the video. The task requires the model to generate a spatio-temporal scene graph $G = (O, R)$ per frame. $o_k \in O$ is represented as objects with category labels and bounding box locations. $r_{j,kl} \in R$ represents the relationships between objects $o_i$ and $o_j$ .
|
| 155 |
+
|
| 156 |
+
Evaluation metrics. We borrow the three standard evaluation modes for image-based scene graph prediction [52]: (i) scene graph detection (SGDET) which expects input images and predicts bounding box locations, object categories, and predicate labels, (ii) scene graph classification (SGCLS) which expects ground truth boxes and predicts object categories and predicate labels, and (iii) predicate classification (PREDCLS), which expects ground truth bounding boxes
|
| 157 |
+
|
| 158 |
+
Table 5: We evaluate recently proposed image-based scene graph prediction models and provide a benchmark for the new task of spatio-temporal scene graph prediction. We find that there is significant room for improvement, especially since these existing methods were designed to be conditioned on a single frame and do not consider the entire video sequence as a whole.
|
| 159 |
+
|
| 160 |
+
<table><tr><td rowspan="3">Method</td><td colspan="4">PredCls</td><td colspan="4">SGCls</td><td colspan="4">SGGen</td></tr><tr><td colspan="2">image</td><td colspan="2">video</td><td colspan="2">image</td><td colspan="2">video</td><td colspan="2">image</td><td colspan="2">video</td></tr><tr><td>R@20</td><td>R@50</td><td>R@20</td><td>R@50</td><td>R@20</td><td>R@50</td><td>R@20</td><td>R@50</td><td>R@20</td><td>R@50</td><td>R@20</td><td>R@50</td></tr><tr><td>VRD [52]</td><td>14.75</td><td>14.85</td><td>14.51</td><td>14.60</td><td>13.65</td><td>14.69</td><td>13.41</td><td>14.44</td><td>10.28</td><td>10.94</td><td>10.04</td><td>10.70</td></tr><tr><td>Freq Prior [85]</td><td>32.70</td><td>32.84</td><td>32.25</td><td>32.37</td><td>31.52</td><td>32.78</td><td>31.08</td><td>32.32</td><td>24.03</td><td>24.87</td><td>23.49</td><td>24.31</td></tr><tr><td>IMP [77]</td><td>35.15</td><td>35.56</td><td>34.50</td><td>34.86</td><td>31.73</td><td>34.85</td><td>31.09</td><td>34.16</td><td>23.88</td><td>25.52</td><td>23.23</td><td>24.82</td></tr><tr><td>MSDN [48]</td><td>35.27</td><td>35.64</td><td>34.61</td><td>34.93</td><td>31.89</td><td>34.98</td><td>31.28</td><td>34.28</td><td>24.00</td><td>25.64</td><td>23.39</td><td>24.95</td></tr><tr><td>Graph R-CNN [78]</td><td>35.36</td><td>35.74</td><td>34.80</td><td>35.12</td><td>31.94</td><td>35.07</td><td>31.43</td><td>34.46</td><td>24.12</td><td>25.77</td><td>23.59</td><td>25.15</td></tr><tr><td>RelDN [86]</td><td>35.89</td><td>36.09</td><td>35.36</td><td>35.51</td><td>33.47</td><td>35.84</td><td>32.96</td><td>35.27</td><td>25.00</td><td>26.21</td><td>24.45</td><td>25.63</td></tr></table>
|
| 161 |
+
|
| 162 |
+
and object categories to predict predicate labels. We refer the reader to the paper that introduced these tasks for more details [52]. We adapt these metrics for video, where the per-frame measurements are first averaged in each video as the measurement of the video, then we average video results as the final result for the test set.
|
| 163 |
+
|
| 164 |
+
Baselines. We benchmark the following image-based scene graph models for the spatio-temporal scene graph prediction task: VRD's visual module (VRD) [52], neural motif's frequency prior (Freq-prior) [85], iterative message passing (IMP) [77], multi-level scene description network (MSDN) [48], graph R-CNN (Graph R-CNN) [78], and relationship detection network (RelDN) [86].
|
| 165 |
+
|
| 166 |
+
Results. To our surprise, we find that IMP, which was one of the earliest scene graph prediction models actually outperforms numerous more recently proposed methods. The most recently proposed scene graph model, RelDN marginally outperforms IMP, suggesting that modeling similarities between object and relationship classes improve performance in our task as well. The small gap in performance between the task of PredCls and SGCls suggests that these models suffer from not being able to accurately detect the objects in the video frames. Improving object detectors designed specifically for videos could improve performance. The models were trained only using Action Genome's data and not finetuned on Visual Genome [43], which contains image-based scene graphs, or on ActivityNet Captions [42], which contains dense captioning of actions in videos with natural language paragraphs. We expect that finetuning models with such datasets would result in further improvements.
|
| 167 |
+
|
| 168 |
+
# 6. Future work
|
| 169 |
+
|
| 170 |
+
With the rich hierarchy of events, Action Genome not only enables research on spatio-temporal scene graph prediction and compositional action recognition, but also promises various research directions. We hope future work will develop methods for the following:
|
| 171 |
+
|
| 172 |
+
Spatio-temporal action localization. The majority of
|
| 173 |
+
|
| 174 |
+
spatio-temporal action localization methods [24, 25, 33, 68] focus on localizing the person performing the action but ignore the objects, which are also involved in the action, that the person interacts with. Action Genome can enable research on localization of both actors and objects, formulating a more comprehensive grounded action localization task. Furthermore, other variants of this task can also be explored; for example, a weakly-supervised localization task where a model is trained with only action labels but tasked with localizing the actors and objects.
|
| 175 |
+
|
| 176 |
+
Explainable action models. Explainable visual models is an emerging field of research. Amongst numerous techniques, saliency prediction has emerged as a key mechanism to interpret machine learning models [54, 65, 67]. Action Genome provides frame-level labels of attention in the form of objects that a person performing the action is either looking at or interacting with. These labels can be used to further train explainable models.
|
| 177 |
+
|
| 178 |
+
Video generation from spatio-temporal scene graphs. Recent studies have explored image generation from scene graphs [3, 34]. Similarly, with a structured video representation, Action Genome enables research on video generation from spatio-temporal scene graphs.
|
| 179 |
+
|
| 180 |
+
# 7. Conclusion
|
| 181 |
+
|
| 182 |
+
We introduce Action Genome, a representation that decomposes actions into spatio-temporal scene graphs. Scene graphs explain how objects and their relationships change as an action occurs. We demonstrated the utility of Action Genome by collecting a large dataset of spatio-temporal scene graphs and used it to improve state of the art results for action recognition as well as few-shot action recognition. Finally, we benchmarked results for the new task of spatio-temporal scene graph prediction. We hope that Action Genome will inspire a new line of research in more decomposable and generalizable video understanding.
|
| 183 |
+
|
| 184 |
+
Acknowledgement. We would like to thank Panasonic for their support.
|
| 185 |
+
|
| 186 |
+
# References
|
| 187 |
+
|
| 188 |
+
[1] Tim Althoff, Hyun Oh Song, and Trevor Darrell. Detection bank: an object detection based video representation for multimedia event recognition. In Proceedings of the 20th ACM international conference on Multimedia, pages 1065-1068. ACM, 2012. 5
|
| 189 |
+
[2] Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. Spice: Semantic propositional image caption evaluation. In European Conference on Computer Vision, pages 382-398. Springer, 2016. 1, 3, 7
|
| 190 |
+
[3] Oron Ashual and Lior Wolf. Specifying object attributes and relations in interactive scene generation. In Proceedings of the IEEE International Conference on Computer Vision, pages 4561-4569, 2019. 8
|
| 191 |
+
[4] Fabien Baradel, Natalia Neverova, Christian Wolf, Julien Mille, and Greg Mori. Object level visual reasoning in videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 105-121, 2018. 3
|
| 192 |
+
[5] Roger G Barker and Herbert F Wright. One boy's day; a specimen record of behavior. 1951. 1, 2
|
| 193 |
+
[6] Roger G Barker and Herbert F Wright. Midwest and its children: The psychological ecology of an american town. 1955. 1, 2
|
| 194 |
+
[7] Mina Bishay, Georgios Zoumpourlis, and Ioannis Pa-tras. Tarn: Temporal attentive relation network for few-shot and zero-shot action recognition. arXiv preprint arXiv:1907.09021, 2019. 3
|
| 195 |
+
[8] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 3
|
| 196 |
+
[9] Joao Carreira, Eric Noland, Chloe Hillier, and Andrew Zisserman. A short note on the kinetics-700 human action dataset. arXiv preprint arXiv:1907.06987, 2019. 2, 3
|
| 197 |
+
[10] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 3, 6
|
| 198 |
+
[11] Roberto Casati and A Varzi. Events, volume 15 of the international research library of philosophy, 1996. 2
|
| 199 |
+
[12] Vincent S Chen, Paroma Varma, Ranjay Krishna, Michael Bernstein, Christopher Re, and Li Fei-Fei. Scene graph prediction with limited labels. arXiv preprint arXiv:1904.11622, 2019. 3
|
| 200 |
+
[13] Aron Culotta and Jeffrey Sorensen. Dependency tree kernels for relation extraction. In Proceedings of the 42nd annual meeting on association for computational linguistics, page 423. Association for Computational Linguistics, 2004. 3
|
| 201 |
+
[14] Bo Dai, Yuqi Zhang, and Dahua Lin. Detecting visual relationships with deep relational networks. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3298-3308. IEEE, 2017. 3
|
| 202 |
+
[15] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al.
|
| 203 |
+
|
| 204 |
+
Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 2, 3
|
| 205 |
+
[16] Chaitanya Desai, Deva Ramanan, and Charless C Fowlkes. Discriminative models for multi-class object layout. International journal of computer vision, 95(1):1-12, 2011. 3
|
| 206 |
+
[17] Apoorva Dornadula, Austin Narcomey, Ranjay Krishna, Michael Bernstein, and Li Fei-Fei. Visual relationships as functions: Enabling few-shot scene graph prediction. arXiv preprint arXiv:1906.04876, 2019. 3
|
| 207 |
+
[18] Sai Kumar Dwivedi, Vikram Gupta, Rahul Mitra, Shuaib Ahmed, and Arjun Jain. Protagan: Towards few shot learning for action recognition. arXiv preprint arXiv:1909.07945, 2019.3
|
| 208 |
+
[19] Victor Escorcia, Fabian Caba Heilbron, Juan Carlos Niebles, and Bernard Ghanem. Daps: Deep action proposals for action understanding. In European Conference on Computer Vision, pages 768-784. Springer, 2016. 2
|
| 209 |
+
[20] Ali Farhadi, Ian Endres, Derek Hoiem, and David Forsyth. Describing objects by their attributes. In Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on, pages 1778-1785. IEEE, 2009. 3
|
| 210 |
+
[21] Li Fei-Fei, Rob Fergus, and Pietro Perona. A bayesian approach to unsupervised one-shot learning of object categories. In Proceedings Ninth IEEE International Conference on Computer Vision, pages 1134–1141. IEEE, 2003. 3
|
| 211 |
+
[22] Li Fei-Fei, Rob Fergus, and Pietro Perona. One-shot learning of object categories. IEEE transactions on pattern analysis and machine intelligence, 28(4):594–611, 2006. 3
|
| 212 |
+
[23] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE International Conference on Computer Vision, pages 6202-6211, 2019. 1, 3, 6
|
| 213 |
+
[24] Rohit Girdhar, João Carreira, Carl Doersch, and Andrew Zisserman. A better baseline for ava. arXiv preprint arXiv:1807.10066, 2018. 8
|
| 214 |
+
[25] Rohit Girdhar, Joao Carreira, Carl Doersch, and Andrew Zisserman. Video action transformer network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 244-253, 2019. 3, 8
|
| 215 |
+
[26] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6047-6056, 2018. 2, 3
|
| 216 |
+
[27] Zhou GuoDong, Su Jian, Zhang Jie, and Zhang Min. Exploring various knowledge in relation extraction. In Proceedings of the 43rd annual meeting on association for computational linguistics, pages 427-434. Association for Computational Linguistics, 2005. 3
|
| 217 |
+
[28] Bridgette M Hard, Barbara Tversky, and David S Lang. Making sense of abstract events: Building event schemas. Memory & cognition, 34(6):1221-1235, 2006. 2
|
| 218 |
+
[29] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceed-
|
| 219 |
+
|
| 220 |
+
ings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6
|
| 221 |
+
[30] Roei Herzig, Moshiko Raboh, Gal Chechik, Jonathan Berant, and Amir Globerson. Mapping images to scene graphs with permutation-invariant structured prediction. In Advances in Neural Information Processing Systems, pages 7211-7221, 2018. 3
|
| 222 |
+
[31] Noureldien Hussein, Efstratios Gavves, and Arnold WM Smeulders. Timeception for complex action recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 254-263, 2019. 1, 3, 6
|
| 223 |
+
[32] Ashesh Jain, Amir R Zamir, Silvio Savarese, and Ashutosh Saxena. Structural-rnn: Deep learning on spatio-temporal graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5308-5317, 2016. 3
|
| 224 |
+
[33] Jianwen Jiang, Yu Cao, Lin Song, Shiwei Zhang4 Yunkai Li, Ziyao Xu, Qian Wu, Chuang Gan, Chi Zhang, and Gang Yu. Human centric spatio-temporal action localization. In ActivityNet Workshop on CVPR, 2018. 8
|
| 225 |
+
[34] Justin Johnson, Agrim Gupta, and Li Fei-Fei. Image generation from scene graphs. arXiv preprint arXiv:1804.01622, 2018. 1, 3, 7, 8
|
| 226 |
+
[35] Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Judy Hoffman, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Inferring and executing programs for visual reasoning. arXiv preprint arXiv:1705.03633, 2017. 1, 3, 7
|
| 227 |
+
[36] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3668-3678, 2015. 1, 3, 7
|
| 228 |
+
[37] Andrej Karpathy, George Toderici, Sanketh Shetty, Thomas Leung, Rahul Sukthankar, and Li Fei-Fei. Large-scale video classification with convolutional neural networks. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 1725–1732, 2014. 2
|
| 229 |
+
[38] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 6
|
| 230 |
+
[39] Orit Kliper-Gross, Tal Hassner, and Lior Wolf. One shot similarity metric learning for action recognition. In International Workshop on Similarity-Based Pattern Recognition, pages 31-45. Springer, 2011. 3
|
| 231 |
+
[40] Philipp Krahenbuhl and Vladlen Koltun. Efficient inference in fully connected crfs with gaussian edge potentials. In Advances in neural information processing systems, pages 109-117, 2011. 3
|
| 232 |
+
[41] Ranjay Krishna, Ines Chami, Michael Bernstein, and Li Fei-Fei. Referring relationships. In Computer Vision and Pattern Recognition, 2018. 1, 3, 7
|
| 233 |
+
[42] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. 8
|
| 234 |
+
|
| 235 |
+
[43] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32-73, 2017. 1, 2, 3, 8
|
| 236 |
+
[44] Christopher A Kurby and Jeffrey M Zacks. Segmentation in the perception and memory of events. Trends in cognitive sciences, 12(2):72-79, 2008. 1, 2, 3
|
| 237 |
+
[45] Li-Jia Li, Hao Su, Li Fei-Fei, and Eric P Xing. Object bank: A high-level image representation for scene classification & semantic feature sparsification. In Advances in neural information processing systems, pages 1378–1386, 2010. 5
|
| 238 |
+
[46] Yikang Li, Wanli Ouyang, Xiaogang Wang, and Xiao'Ou Tang. Vip-cnn: Visual phrase guided convolutional neural network. In Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on, pages 7244-7253. IEEE, 2017. 3
|
| 239 |
+
[47] Yikang Li, Wanli Ouyang, Bolei Zhou, Jianping Shi, Chao Zhang, and Xiaogang Wang. Factorizable net: an efficient subgraph-based framework for scene graph generation. In European Conference on Computer Vision, pages 346-363. Springer, 2018. 3
|
| 240 |
+
[48] Yikang Li, Wanli Ouyang, Bolei Zhou, Kun Wang, and Xiaogang Wang. Scene graph generation from objects, phrases and region captions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1261-1270, 2017. 3, 5, 8
|
| 241 |
+
[49] Xiaodan Liang, Lisa Lee, and Eric P Xing. Deep variation-structured reinforcement learning for visual relationship and attribute detection. In Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on, pages 4408–4417. IEEE, 2017. 3
|
| 242 |
+
[50] Ivan Lillo, Alvaro Soto, and Juan Carlos Niebles. Discriminative hierarchical modeling of spatio-temporally composable human activities. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 812-819, 2014. 1, 2
|
| 243 |
+
[51] Ji Lin, Chuang Gan, and Song Han. Tsm: Temporal shift module for efficient video understanding. In Proceedings of the IEEE International Conference on Computer Vision, pages 7083-7093, 2019. 3
|
| 244 |
+
[52] Cewu Lu, Ranjay Krishna, Michael Bernstein, and Li Fei-Fei. Visual relationship detection with language priors. In European Conference on Computer Vision, pages 852–869. Springer, 2016. 5, 7, 8
|
| 245 |
+
[53] Chih-Yao Ma, Asim Kadav, Iain Melvin, Zsolt Kira, Ghassan AlRegib, and Hans Peter Graf. Attend and interact: Higher-order object interactions for video understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6790-6800, 2018. 3
|
| 246 |
+
[54] Aravindh Mahendran and Andrea Vedaldi. Salient deconvolutional networks. In European Conference on Computer Vision, pages 120-135. Springer, 2016. 8
|
| 247 |
+
[55] Albert Michotte. The perception of causality. Routledge, 1963. 1, 2
|
| 248 |
+
[56] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 2
|
| 249 |
+
|
| 250 |
+
[57] George A Miller and Philip N Johnson-Laird. Language and perception. Belknap Press, 1976. 2
|
| 251 |
+
[58] Ashish Mishra, Vinay Kumar Verma, M Shiva Krishna Reddy, S Arulkumar, Piyush Rai, and Anurag Mittal. A generative approach to zero-shot and few-shot action recognition. In 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 372-380. IEEE, 2018. 3
|
| 252 |
+
[59] Alejandro Newell and Jia Deng. Pixels to graphs by associative embedding. In Advances in Neural Information Processing Systems, pages 2168-2177, 2017. 3
|
| 253 |
+
[60] Darren Newtson. Attribution and the unit of perception of ongoing behavior. Journal of Personality and Social Psychology, 28(1):28, 1973. 2
|
| 254 |
+
[61] Devi Parikh and Kristen Grauman. Relative attributes. In Computer Vision (ICCV), 2011 IEEE International Conference on, pages 503-510. IEEE, 2011. 3
|
| 255 |
+
[62] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 6
|
| 256 |
+
[63] Jeremy R Reynolds, Jeffrey M Zacks, and Todd S Braver. A computational model of event segmentation from perceptual prediction. Cognitive science, 31(4):613-643, 2007. 2, 3
|
| 257 |
+
[64] Sebastian Schuster, Ranjay Krishna, Angel Chang, Li Fei-Fei, and Christopher D Manning. Generating semantically precise scene graphs from textual descriptions for improved image retrieval. In Proceedings of the fourth workshop on vision and language, pages 70–80, 2015. 1, 3, 7
|
| 258 |
+
[65] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE International Conference on Computer Vision, pages 618-626, 2017. 8
|
| 259 |
+
[66] Gunnar A Sigurdsson, Gül Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In European Conference on Computer Vision, pages 510-526. Springer, 2016. 1, 2, 3, 4, 6
|
| 260 |
+
[67] Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034, 2013. 8
|
| 261 |
+
[68] Chen Sun, Abhinav Shrivastava, Carl Vondrick, Kevin Murphy, Rahul Sukthankar, and Cordelia Schmid. Actor-centric relation network. In Proceedings of the European Conference on Computer Vision (ECCV), pages 318-334, 2018. 8
|
| 262 |
+
[69] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 4489-4497. IEEE, 2015. 1, 3
|
| 263 |
+
[70] Zhuowen Tu and Xiang Bai. Auto-context and its application to high-level vision tasks and 3d brain image segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 32(10):1744-1757, 2010. 3
|
| 264 |
+
[71] Gül Varol, Ivan Laptev, and Cordelia Schmid. Long-term temporal convolutions for action recognition. IEEE
|
| 265 |
+
|
| 266 |
+
transactions on pattern analysis and machine intelligence, 40(6):1510-1517, 2017. 2
|
| 267 |
+
[72] Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks: Towards good practices for deep action recognition. In European conference on computer vision, pages 20-36. Springer, 2016. 1, 3
|
| 268 |
+
[73] Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7794-7803, 2018. 6
|
| 269 |
+
[74] Xiaolong Wang and Abhinav Gupta. Videos as space-time region graphs. In Proceedings of the European Conference on Computer Vision (ECCV), pages 399-417, 2018. 3, 6
|
| 270 |
+
[75] Philippe Weinzaepfel, Xavier Martin, and Cordelia Schmid. Human action localization with sparse spatial supervision. arXiv preprint arXiv:1605.05197, 2016. 2, 3
|
| 271 |
+
[76] Chao-Yuan Wu, Christoph Feichtenhofer, Haoqi Fan, Kaiming He, Philipp Krahenbuhl, and Ross Girshick. Long-term feature banks for detailed video understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 284-293, 2019. 2, 3, 5, 6, 7
|
| 272 |
+
[77] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, volume 2, 2017. 3, 5, 8
|
| 273 |
+
[78] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. arXiv preprint arXiv:1808.00191, 2018. 3, 5, 8
|
| 274 |
+
[79] Benjamin Yao, Xiong Yang, and Song-Chun Zhu. Introduction to a large-scale general purpose ground truth database: methodology, annotation tool and benchmarks. In International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition, pages 169-183. Springer, 2007. 2
|
| 275 |
+
[80] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126(2-4):375-389, 2018. 2
|
| 276 |
+
[81] Serena Yeung, Olga Russakovsky, Greg Mori, and Li Fei-Fei. End-to-end learning of action detection from frame glimpses in videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2678–2687, 2016. 2
|
| 277 |
+
[82] Joe Yue-Hei Ng, Matthew Hausknecht, Sudheendra Vijayanarasimhan, Oriol Vinyals, Rajat Monga, and George Toderici. Beyond short snippets: Deep networks for video classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4694–4702, 2015. 2
|
| 278 |
+
[83] Jeffrey M Zacks, Todd S Braver, Margaret A Sheridan, David I Donaldson, Abraham Z Snyder, John M Ollinger, Randy L Buckner, and Marcus E Raichle. Human brain activity time-locked to perceptual event boundaries. Nature neuroscience, 4(6):651, 2001. 2
|
| 279 |
+
[84] Jeffrey M Zacks, Barbara Tversky, and Gowri Iyer. Perceiving, remembering, and communicating structure in events.
|
| 280 |
+
|
| 281 |
+
Journal of experimental psychology: General, 130(1):29, 2001. 1, 2, 3
|
| 282 |
+
[85] Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. arXiv preprint arXiv:1711.06640, 2017. 3, 5, 8
|
| 283 |
+
[86] Ji Zhang, Kevin J Shih, Ahmed Elgammal, Andrew Tao, and Bryan Catanzaro. Graphical contrastive losses for scene graph parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 11535-11543, 2019. 5, 6, 8
|
| 284 |
+
[87] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE International Conference on Computer Vision, pages 8668-8678, 2019. 1, 2, 3
|
| 285 |
+
[88] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 803-818, 2018. 3
|
| 286 |
+
[89] Guodong Zhou, Min Zhang, DongHong Ji, and Qiaoming Zhu. Tree kernel-based relation extraction with context-sensitive structured parse tree information. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), 2007. 3
|
| 287 |
+
[90] Linchao Zhu and Yi Yang. Compound memory networks for few-shot video classification. In Proceedings of the European Conference on Computer Vision (ECCV), pages 751-766, 2018. 3
|
| 288 |
+
[91] Tao Zhuo, Zhiyong Cheng, Peng Zhang, Yongkang Wong, and Mohan Kankanhalli. Explainable video action reasoning via prior knowledge and state transitions. In Proceedings of the 27th ACM International Conference on Multimedia, pages 521-529. ACM, 2019. 2
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c9b217e52aebc6b81d42d1987a161835cddebe37796bc006413df614951531f
|
| 3 |
+
size 513359
|
actiongenomeactionsascompositionsofspatiotemporalscenegraphs/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be289d49a1679631141302b4b5c54f685c3304c28982da01f09f39c01e0a2a64
|
| 3 |
+
size 398540
|
actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b85826b4020d23ae582da1afe4078f4245952a54f8cd9716642ede20939d42e5
|
| 3 |
+
size 80911
|
actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0430ebca4c034364eb335d1ab71d563f6d43bbd3abf942f7949a7aadb16787d
|
| 3 |
+
size 103535
|
actionmodifierslearningfromadverbsininstructionalvideos/8c8a9e3d-94bb-4a0c-abcf-3812587bf4a4_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a13324b48dfd1fe71f78654c3df5355aabd2dbd017cd77070619d288569853b2
|
| 3 |
+
size 1137991
|
actionmodifierslearningfromadverbsininstructionalvideos/full.md
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action Modifiers: Learning from Adverbs in Instructional Videos
|
| 2 |
+
|
| 3 |
+
Hazel Doughty<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
Ivan Laptev2
|
| 6 |
+
|
| 7 |
+
Walterio Mayol-Cuevas<sup>1,3</sup>
|
| 8 |
+
|
| 9 |
+
Dima Damen
|
| 10 |
+
|
| 11 |
+
<sup>1</sup>University of Bristol
|
| 12 |
+
|
| 13 |
+
2Inria, École Normale Supérieure
|
| 14 |
+
|
| 15 |
+
3Amazon
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
We present a method to learn a representation for adverbs from instructional videos using weak supervision from the accompanying narrations. Key to our method is the fact that the visual representation of the adverb is highly dependant on the action to which it applies, although the same adverb will modify multiple actions in a similar way. For instance, while 'spread quickly' and 'mix quickly' will look dissimilar, we can learn a common representation that allows us to recognize both, among other actions.
|
| 20 |
+
|
| 21 |
+
We formulate this as an embedding problem, and use scaled dot-product attention to learn from weakly-supervised video narrations. We jointly learn adverbs as invertible transformations operating on the embedding space, so as to add or remove the effect of the adverb. As there is no prior work on weakly supervised learning of adverbs, we gather paired action-adverb annotations from a subset of the HowTo100M dataset for 6 adverbs: quickly/slowly, finely/coarsely, and partially/completely. Our method outperforms all baselines for video-to-adverb retrieval with a performance of 0.719 mAP. We also demonstrate our model's ability to attend to the relevant video parts in order to determine the adverb for a given action.
|
| 22 |
+
|
| 23 |
+
# 1. Introduction
|
| 24 |
+
|
| 25 |
+
Instructional videos are a popular type of media watched by millions of people around the world to learn new skills. Several previous works aimed to learn the key steps necessary to complete the task from these videos [1, 30, 45, 62]. However, identifying the steps, or their order, is not all one needs to perform the task well; some steps need to be performed in a certain way to achieve the desired outcome. Take for example the task of making a meringue. An expert would assure you it is critical to add the sugar gradually and avoid over-beating by folding the mixture gently.
|
| 26 |
+
|
| 27 |
+
This is related to recent efforts on assessing the performance of daily tasks [10, 11, 26], however, these works do not assess individual actions or identify whether they have been performed as recommended by, say, a recipe. As in
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Figure 1. We learn a joint video-text embedding space from instructional videos and accompanying action-adverb pairs in the narration. Within this space, we learn adverbs as action modifiers — that is transformations which modify the action's embedding.
|
| 31 |
+
|
| 32 |
+
the example before, steps with such caveats are often indicated by adverbs describing how actions should be performed. These adverbs (e.g. quickly, gently, ...) generalize to different actions and modify the manner of an action. We thus learn these as action modifiers (Fig. 1).
|
| 33 |
+
|
| 34 |
+
To learn action modifiers for a variety of tasks and actions, we utilize the online resource of instructional videos with accompanying narrations. However, this form of supervision is weak and noisy. Not only are the narrations just roughly aligned with the actions in the video, but often the narrated actions may not be captured in the video altogether. For example, a YouTube instructional video might be narrated as "pour in the cream quickly" but the visuals only show the cream already added. In this case the video would not be useful to learn the adverb 'quickly'.
|
| 35 |
+
|
| 36 |
+
As the main contribution of this paper, we propose the
|
| 37 |
+
|
| 38 |
+
first method for weakly supervised learning from adverbs, in which we embed relevant video segments in a latent space and learn adverbs as transformations in this space. We collect action-adverb labels from narrations of a subset of tasks in the HowTo100M dataset [33]. The method is evaluated for video-to-adverb retrieval, as well as adverb-to-video retrieval and shows significant improvements over baselines. Additionally, we present a comprehensive ablation study demonstrating that jointly learning a good action embedding is key to learning action modifiers.
|
| 39 |
+
|
| 40 |
+
# 2. Related Work
|
| 41 |
+
|
| 42 |
+
We review works which learn from instructional videos, followed by works using parts-of-speech in video. We then review the related task of object attributes in images and methods which learn embeddings under weak supervision.
|
| 43 |
+
|
| 44 |
+
Instructional Videos. Movies accompanied by subtitles and scripts have been used for learning from video [12, 13, 25, 47]. However, movies typically focus on talking heads with few object interactions. More recently, instructional videos are a popular source of datasets [1, 33, 44, 60] with hundreds of online videos of the same task. Narrations are used to learn steps of complex tasks [1, 18, 30, 42, 45, 62], and more recently for video retrieval [33], visual grounding [17, 19], action segmentation [60] and learning actions through object state changes [2, 14].
|
| 45 |
+
|
| 46 |
+
In this work, we offer a novel insight into how these instructional videos can be used beyond step identification. Our work utilizes videos from the recently released HowTo100M dataset [33], learning adverbs and their relevance to critical steps in these tasks.
|
| 47 |
+
|
| 48 |
+
Learning from Parts-of-Speech in Video. Several problems are at the intersection between language and video: captioning [24, 38, 55, 59], retrieval [9, 16, 21, 31, 33, 52, 54] and visual question answering [15, 56, 57, 61]. The majority of these works use LSTMs or GRUs to combine words into sentence-level features. While some works use learned pooling [32] or attention [55, 56, 57], they do not use knowledge of the words' parts-of-speech (PoS).
|
| 49 |
+
|
| 50 |
+
A few recent works differentiate words by their PoS tags. Xu et al. [54] learn a joint video-text embedding space after detecting (subject, verb, object) triplets in the input caption. Wray et al. [52] perform fine-grained action retrieval by learning a separate embedding for each PoS before combining these embeddings. Both works focus on verb and noun PoS, as they target action recognition. Alayrac et al. [1] also use verb-noun pairs; the authors use direct object relations to learn unsupervised clusterings of key steps in instructional videos.
|
| 51 |
+
|
| 52 |
+
While some adverbs are contained in video captioning datasets [24, 59], no prior captioning work models or recognizes these adverbs. The only prior work to utilize adverbs
|
| 53 |
+
|
| 54 |
+
is that of Pang et al. [39] where many adverbs in the ADHA dataset model moods and facial expressions (e.g. 'happily', 'proudly'). The work uses full supervision including action bounding boxes. Instead, in this work we target adverbs that represent the manner in which an action is performed, using only weak supervision from narrations.
|
| 55 |
+
|
| 56 |
+
Object Attributes in Images. Adverbs of actions are analogous with adjectives of objects. Learning adjectives for nouns has been investigated in the context of recognizing object-attribute pairs [4, 7, 20, 34, 36, 37, 50, 51] from images. Both [7, 34] tackle the problem of contextuality of attributes, where the appearance of an attribute can vastly differ depending on the object it applies to. Chen and Grauman [7] formulate this as transfer learning to recognize unseen object-attribute compositions. Misra et al. [34] learn how to compose separate object and attributes classifiers for novel combinations. Instead of using classifiers to recognize attributes, Nagarajan and Grauman [36] model attributes as a transformation of an object's embedding. Our work is inspired by this approach.
|
| 57 |
+
|
| 58 |
+
While some works learn attributes for actions [28, 43, 58], these detect combinations of specific attributes (e.g. 'outdoor', 'uses toothbrush') to perform zero shot recognition and do not consider adverbs as attributes.
|
| 59 |
+
|
| 60 |
+
Weakly Supervised Embedding. Learned embeddings are commonly used for retrieval tasks, however few works have attempted to learn embeddings under weak supervision [3, 35, 46, 53]. In [3], weak supervision is overcome using a triplet loss that only optimizes distances to the definite negatives and identifies the best matching positive. Two works [35, 46] perform video moment retrieval from text queries without temporal bounds in training. Similar to our approach, both use a form of text-guided attention to find the relevant portion of the video, however these use the full sentence. In our work, we simultaneously embed the relevant portion of the video while learning how adverbs modify actions. We detail our method next.
|
| 61 |
+
|
| 62 |
+
# 3. Learning Action Modifiers
|
| 63 |
+
|
| 64 |
+
The inputs to our model are action-adverb narrations and the accompanying instructional videos. Fig. 2(a) shows a sample instructional video, narrated with "...start by quickly rolling our lemons...", from which we identify the action roll and the adverb quickly (see Sec. 4 for NLP details). After training, our model is able to assess whether videos in the test set, of the same or different action, have been achieved quickly, among other learned adverbs.
|
| 65 |
+
|
| 66 |
+
We present an overview of our method in Fig. 2. We learn a joint video-text embedding shown in Fig. 2(b), where the relevant video parts are embedded (blue dot) close to the text representation of the adverb-modified action 'roll quickly' (yellow dot). We review how joint video
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
Figure 2. (a) Our input is a video $x$ with the weak label $(a, m)$ for the action and adverb respectively. (b) We aim to learn a joint video-text embedding space for adverb and video retrieval where the embedded video (blue) and action-adverb text representation (yellow) are close. (c) We learn adverbs as action modifiers which are transformations in the embedding space. (d) We embed $f'(x, a)$ , a visual representation of the relevant video parts using multi-head scaled dot-product attention where the query is a projection of the action embedding $g(a)$ .
|
| 70 |
+
|
| 71 |
+
text embeddings are typically learned in Sec. 3.1. This section also introduces the notations for the rest of the paper.
|
| 72 |
+
|
| 73 |
+
Two prime challenges exist in learning the embedding for our problem, i.e. learning from adverbs in instructional videos. The first is disentangling the representation of the action from the adverb, allowing us to learn how the same adverb applies across different actions. We propose to learn adverbs as action modifiers, one per adverb, as in Fig. 2(c). In Sec. 3.2 we introduce these action modifiers, which we represent as transformations in the embedding space.
|
| 74 |
+
|
| 75 |
+
The second challenge is learning the visual representation from the relevant parts of the video in a weakly-supervised manner, i.e. without annotations of temporal bounds. In Sec. 3.3, we propose a weakly-supervised embedding function that utilizes multi-head scaled dot-product attention. This uses the text embedding of the action as the query to attend to relevant video parts, as shown in Fig. 2(d).
|
| 76 |
+
|
| 77 |
+
# 3.1. Learning an Action Embedding
|
| 78 |
+
|
| 79 |
+
Our base model is a joint video-text embedding, as in [32, 52, 54]. Specifically, given a set of video clips $x \in X$ with corresponding action labels $a \in A$ , our goal is to obtain two embedding functions, one visual and one textual, $f: X \to E$ and $g: A \to E$ such that $f(x)$ and $g(a)$ are close in the embedding space $E$ and $f(x)$ is distant from other action embeddings $g(a')$ . These functions $f$ and $g$ can be optimized with a standard cross-modal triplet loss:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\begin{array}{l} \mathcal {L} _ {\text {t r i p l e t}} = \max (0, d (f (x), g (a)) \\ - d \left(f (x), g \left(a ^ {\prime}\right)\right) + \beta) s. t. a ^ {\prime} \neq a \tag {1} \\ \end{array}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where $a'$ is an action different to $a, d$ is the Euclidean distance and $\beta$ is the margin, set to 1 in all experiments. We use $g(a)$ as the GloVe [41] embedding of the action's verb, and explain how we replace $f(x)$ by $f'(x, a)$ in Sec. 3.3.
|
| 86 |
+
|
| 87 |
+
# 3.2. Modeling Adverbs as Action Modifiers
|
| 88 |
+
|
| 89 |
+
While actions exist without adverbs, adverbs are by definition tied to the action, and only gain visual representation when attached to one. Although adverbs have a similar effect on different actions, the visual representation is highly dependent on the action. Therefore, we follow prior work from [36] on object-attribute pairs and model adverbs as learned transformations in the video-text embedding space $E$ (Sec. 3.1). As these transformations modify the embedding of the action, we call them action modifiers. We learn an action modifier $O_{m}$ for each adverb $m \in M$ , such that
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
O _ {m} (z) = W _ {m} z \tag {2}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $z$ is any point in the embedding space $E$ and $O_{m}: E \to E$ is a learned linear transform by a weight matrix $W_{m}$ . In Sec. 5, we test other geometric transformations: fixed translation, learned translation and nonlinear transformation. Each transformation $O_{m}$ can be applied to any text representation $O_{m}(g(a))$ or video representation $O_{m}(f(x))$ in $E$ to add the effect of the adverb $m$ .
|
| 96 |
+
|
| 97 |
+
A video $x \in X$ , labeled with action-adverb pair $(a, m)$ , contains a visual representation of the adverb-modified action. We thus aim to embed $f(x)$ close to $O_m(g(a))$ . This is equivalent to embedding the inverse of the transformation $O_m^{-1}(f(x))$ near the action $g(a)$ . We thus jointly learn our embedding, with the action modifiers $O_m$ , using the sum of two triplet losses. The first focuses on the action:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\begin{array}{l} \mathcal {L} _ {a c t} = \max (0, d (f (x), O _ {m} (g (a))) \\ - d (f (x), O _ {m} \left(g \left(a ^ {\prime}\right)\right)) + \beta) s. t. a ^ {\prime} \neq a \tag {3} \\ \end{array}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $a^\prime$ is a different action and $d$ and $\beta$ are the distance function and margin as in Sec. 3.1. Similarly, we have a
|
| 104 |
+
|
| 105 |
+
triplet loss that focuses on the adverb, such that:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\begin{array}{l} \mathcal {L} _ {a d v} = \max (0, d (f (x), O _ {m} (g (a))) \\ - d (f (x), O _ {\overline {{m}}} (g (a))) + \beta) \tag {4} \\ \end{array}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\overline{m}$ is the antonym of the labeled adverb $m$ (e.g. when $m = \text{'quickly'}$ , the antonym $\overline{m} = \text{'slowly'}$ ). We restrict the negative in $\mathcal{L}_{adv}$ to only the antonym to deal with adverbs not being mutually exclusive. For instance, a video labeled 'slice quickly' does not preclude the slicing being also done 'finely'. However, it surely has not been done 'slowly'. We demonstrate the effect of this choice in Sec. 5.
|
| 112 |
+
|
| 113 |
+
# 3.3. Weakly Supervised Embedding
|
| 114 |
+
|
| 115 |
+
All prior works that learn attributes of objects from images [7, 20, 34, 36, 37] utilize fully annotated datasets, where the object the attributes relate to is the only object of interest in the image. In contrast, we aim to learn action modifiers from video in a weakly supervised manner. Our input is untrimmed videos containing multiple consecutive actions. To learn adverbs, we need the visual representation to be only from the video parts relevant to the action (e.g. 'roll' in our Fig. 2 example). We propose using scaled dot-product attention [49], where the embedded action of interest acts as a query to identify relevant video parts.
|
| 116 |
+
|
| 117 |
+
For each video $x$ , we use a temporal window of size $T$ , centered around the timestamp of the narrated action-adverb pair, containing video segments $\{x_1, x_2, \dots, x_T\}$ . We start from the visual representation of all segments $f(x) = \{f(x_1), \dots, f(x_T)\}$ , where $f(\cdot)$ is an I3D network. From this, we wish to learn an embedding of the visual features relevant to the action $a$ , which we call $f'(x, a)$ . Inspired by [49], we project $f(x)$ into keys $K$ and values $V$ :
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
K = W ^ {K} f (x); \quad V = W ^ {V} f (x) \tag {5}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
We then set the query $Q = W^{Q}g(a)$ to be the projection of the action embedding, to weight video segments by their relevance to that action. The attention weights are obtained from the dot product of the keys $K$ and the action query $Q$ . These then pool the values $V$ . Specifically:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
H (x, a) = \sigma \left(\frac {\left(W ^ {Q} g (a)\right) ^ {\top} W ^ {K} f (x)}{\sqrt {T}}\right) W ^ {V} f (x) \tag {6}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
where $H(x,a)$ is a single attention head and $\sigma$ is the softmax function. We train multiple attention heads such that,
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
f ^ {\prime} (x, a) = W ^ {H} \left[ H _ {1} (x, a), \dots , H _ {h} (x, a) \right] \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
where $W^{H}$ projects the concatenation of the multiple attention heads $H_{i}(x,a)$ into the embedding space. We learn $h$ attention head weights: $W_{i}^{Q},W_{i}^{K},W_{i}^{V}$ as well as $W^{H}$ parameters for our weakly-supervised embedding.
|
| 136 |
+
|
| 137 |
+
It is important to highlight that these weights are jointly trained with the embedding space $E$ , so that $f'(x, a)$ is used instead of $f(x)$ in Equations 3 and 4. We opted to explain our embedding space before detailing how it can be learned in a weakly-supervised manner, to simplify the explanation.
|
| 138 |
+
|
| 139 |
+
# 3.4. Weakly Supervised Inference
|
| 140 |
+
|
| 141 |
+
Once trained, our model can be used to evaluate cross-modal retrieval of videos and adverbs. For video-to-adverb retrieval, we consider a video query $x$ and the narrated action $a$ , and we wish to estimate the adverb $m$ . For example, we have a video and wish to find the manner in which the action 'slice' was performed. We use the learned function $f'(x, a)$ to embed the relevant visual representation for action $a$ in $E$ . We then rank adverbs by the distance from this embedding to all modified actions $\forall m: O_m(g(a))$ .
|
| 142 |
+
|
| 143 |
+
For adverb-to-video retrieval, we consider an action-adverb pair $(a,m)$ as a query, embed $O_{m}(g(a))$ , e.g. 'slice finely', and calculate the distance from this text representation to all relevant video segments $\forall x:f'(x,a)$ . For both cases, this allows us to use $a$ to query to the weakly supervised embedding, so as to attend to the relevant video parts.
|
| 144 |
+
|
| 145 |
+
# 4. Dataset
|
| 146 |
+
|
| 147 |
+
HowTo100M [33] is a large scale dataset of instructional videos collected from YouTube. Each video has a corresponding narration from manually-entered subtitles or Automatic Speech Recognition (ASR). No ground-truth is available in terms of correct actions or temporal extents.
|
| 148 |
+
|
| 149 |
+
To test cross-task generalization, we use the same 83 tasks previously used in [62]. These come from cooking, DIY and car maintenance, and are divided into 65 tasks for training and a disjoint set of 18 tasks for testing. However, in [62], only 30 videos per task were used in training. Instead, we use all videos available for these 65 training tasks, where each task consists of 100-500 videos. In total, we have 24,558 videos in training and 1,280 videos in the test set. For these we find action-adverb pairs as follows.
|
| 150 |
+
|
| 151 |
+
We use the accompanying narrations to discover action-adverb pairs, for both training and testing. First we employ T-BRNN [48] to punctuate the subtitles<sup>1</sup>, then perform Part-of-Speech (POS) tagging with SpaCy's English core web model. We search for verb→adverb relationships with the advmod dependency, indicating the adverb modifies the verb. We exclude verbs with VBD (past tense) and VBZ (third person singular) tags as these correlate with actions not being shown in the video. For example, in 'sprinkle some finely chopped coriander', 'chopped' is tagged with VBD. Similarly, in 'everything fits together neatly', the verb 'fits' is tagged as VBZ. Examples of the (action, adverb) pairs obtained from the pipeline with the correspond
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
Figure 3. Log-scaled y-axis shows instances of each adverb plotted per action. We display adverbs against their paired antonym (+/- axis).
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
... if you turn the bowl upside down slowly they won't come out ...
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
... mix it well until it is completely dissolved ...
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
... you want to make sure you fill it up partially ...
|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
... you want to dice it finely..
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 4. Example videos and narrations, highlighting the action and adverb discovered with our NLP pipeline. In some cases the weak timestamp is a good localization of the action (top), however in others the action is long (second), the timestamp is a poor match (third), or the action is not captured in the video (bottom).
|
| 170 |
+
|
| 171 |
+
ing video snippets are shown in Fig. 4. Additionally, we manually filter actions and adverbs that are not visual, e.g. 'recommend' and 'normally', respectively. We explored automatic approaches such as word concreteness scores [5], but found these approaches to be unreliable. We also group verbs into clusters to avoid synonyms as in [8], i.e. we consider 'put' and 'place' as the same action. From this process, we obtain 15,266 instances of action-adverb pairs.
|
| 172 |
+
|
| 173 |
+
However, these have a long tail of adverbs that are mentioned only a handful of times. We restrict our learning to 6 commonly used adverbs, that come in 3 pairs of antonyms: 'partially'/'completely', 'quickly'/'slowly' and 'finely'/'coarsely'. These adverbs appear in 263 unique action-adverb pairs with 72 different actions. We show the distribution of adverbs per action in Fig. 3. While our training is noisy, i.e. actions might not appear in the video (refer to Fig. 4 bottom), we clean the test set for accurate evaluation of the method. We only consider test set videos where
|
| 174 |
+
|
| 175 |
+
the action-adverb is present in the video and appears within the 20 seconds around the narration timestamp. These correspond to $44\%$ of the original test set, which is comparable to the $50\%$ level of noise reported by the authors in [33].
|
| 176 |
+
|
| 177 |
+
This results in 5,475 action-adverb pairs in training and 349 in testing. We consider the mean timestamp between the verb and adverb narrations as the weak supervision for the action's location. These action-adverb weak timestamp annotations and accompanying code are publicly available<sup>2</sup>.
|
| 178 |
+
|
| 179 |
+
# 5. Experiments
|
| 180 |
+
|
| 181 |
+
We first describe the implementation details of our method, followed by the metrics we use for evaluation. We then present our results against those of baselines and evaluate the contribution of the different components.
|
| 182 |
+
|
| 183 |
+
Implementation Details. We sample all videos at 25fps and scale to a height of 256 pixels. We use I3D [6] with 16 frame segments, pre-trained on Kinetics [22], for both RGB and optical flow. We concatenate these to create 2048D features, extracted once per second as in [62], for $T = 20$ seconds around the narration timestamp.
|
| 184 |
+
|
| 185 |
+
In all experiments, our embedding space $E$ is 300D, the same as the GloVe word representation [41]. We initialize the action embeddings with the verb's GloVe vector, pretrained on the Wikipedia and Gigaword corpora. The action modifiers $O_{m}$ are initialized with the identity matrix such that they have no effect at first. For our scaled dot-product attention, $Q$ is of size $75 \times 1$ and $K$ and $V$ are of size $75 \times T$ . We use 4 attention heads in $f'(x, a)$ .
|
| 186 |
+
|
| 187 |
+
All our models are trained with the Adam optimizer [23] for 1000 epochs with a batch size of 512 and a learning rate of $10^{-4}$ . To aid disentangling the actions and adverbs, we first let the model learn only actions (optimized by $\mathcal{L}_{\text {triplet }}$ ) for 200 epochs before introducing the action modifiers. The weights of the action modifiers $W_{m}$ (Eq. 2) are then learned at a slower rate of $10^{-5}$ .
|
| 188 |
+
|
| 189 |
+
Evaluation Metric. We report mean Average Precision (mAP) for video-to-adverb and adverb-to-video retrieval. For video-to-adverb given a video and the narrated
|
| 190 |
+
|
| 191 |
+
action we rank the 6 adverbs' relevance. For adverb-to-video given an adverb query (e.g. 'slowly'), we rank videos by the distance of each video labelled with its associated action (e.g. 'put') to the text embedding of the verb-adverb (e.g. 'put slowly') and calculate mAP across the 6 adverbs.
|
| 192 |
+
|
| 193 |
+
We also report mAP where we restrict the retrieval to the adverb and its antonym, which we refer to as the Antonym setting. This 'Antonym' metric better represents the given labels, therefore we use it for the ablation study. To clarify, we may have a video narrated 'cut coarsely'. We are thus confident the cut was not performed 'finely', however we cannot judge the speed of ('quickly' or 'slowly'). In Antonym video-to-adverb, there are only two possible adverbs to retrieve, thus we report Precision@1 (P@1) which is the same as binary classification accuracy. Similarly, we report mAP Antonym for adverb-to-video retrieval, where we only rank videos labeled with the adverb or its antonym.
|
| 194 |
+
|
| 195 |
+
# 5.1. Comparative Results
|
| 196 |
+
|
| 197 |
+
We first compare our work to baselines. Since ours is the first work to learn from adverbs in videos, we adapt methods that learn attributes of objects in images for comparison, as this is the most similar existing task to ours. In this adaptation, actions replace objects, and adverbs replace attributes/adjectives.
|
| 198 |
+
|
| 199 |
+
We compare to RedWine [34] and AttributeOp [36] as well as the LabelEmbed baseline proposed in [34] which uses GloVe features in place of SVM classifier weights. We replace the image representation by a uniformly weighted visual representation of video segments. Similar to our evaluation, we report results when the action is given in testing, referred to as the 'oracle' evaluation in [36]. Furthermore, for a fair comparison, we use only the antonym as the negative in each method's loss, as we do in Eq. 4. AttributeOp proposes several linguistic inspired regularizers; we report the best combination of regularizers for our dataset — the auxiliary and commutative regularizers. We also compare to random chance and a naive binary classifier per adverb pair. This classifier is analogous to the Visual Product baseline used in [34, 36]. We report on both versions of this baseline, a Linear SVM which trains a binary one-vs-all classifier per adverb (Classifier-SVM) and a 6-way MLP of two fully connected layers (Classifier-MLP). In video-to-adverb, we rank adverbs by classifiers' confidence scores, as in [36]. In adverb-to-video, we use the confidence of the corresponding classifier or MLP output to rank videos.
|
| 200 |
+
|
| 201 |
+
Comparative results are presented in Table 1. Our method outperforms all baselines for video-to-adverb retrieval, both when comparing against all adverbs and when restricting the evaluation to antonym pairs. We see that AttributeOp is the best baseline method, generally performing better than both RedWine and LabelEmbed. The two latter methods work on a fixed visual feature space, thus
|
| 202 |
+
|
| 203 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">video-to-adverb</td><td colspan="2">adverb-to-video</td></tr><tr><td>Antonym</td><td>All</td><td>Antonym</td><td>All</td></tr><tr><td>Chance</td><td>0.500</td><td>0.408</td><td>0.511</td><td>0.170</td></tr><tr><td>Classifier-SVM</td><td>0.605</td><td>0.532</td><td>0.563</td><td>0.264</td></tr><tr><td>Classifier-MLP</td><td>0.685</td><td>0.602</td><td>0.603</td><td>0.304</td></tr><tr><td>RedWine [34]</td><td>0.693</td><td>0.594</td><td>0.595</td><td>0.290</td></tr><tr><td>LabelEmbed [34]</td><td>0.717</td><td>0.621</td><td>0.618</td><td>0.297</td></tr><tr><td>AttributeOp [36]</td><td>0.728</td><td>0.612</td><td>0.597</td><td>0.350</td></tr><tr><td>Ours</td><td>0.808</td><td>0.719</td><td>0.657</td><td>0.329</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Table 1. Comparative Evaluation. Best performance in **bold** and second best **underlined**. We report results for both video-to-adverb and adverb-to-video retrieval with results restricted to the adverb and its antonym (Antonym) and when unrestricted (All).
|
| 206 |
+
|
| 207 |
+
are prone to errors when the features are non-separable in that space. We can also see that LabelEmbed performs better than RedWine across all metrics, demonstrating that GloVe features are better representations than SVM classifier weights. While AttributeOp marginally outperforms our approach on adverb-to-video 'All', it underperforms on all other metrics, including our main objective, estimating the correct adverb over its antonym for a video query.
|
| 208 |
+
|
| 209 |
+
# 5.2. Qualitative Results
|
| 210 |
+
|
| 211 |
+
Fig. 5 presents video examples. For each, we demonstrate attention weights for several action queries. Our method is able to successfully attend to segments relevant to various query actions. The figure also shows predicted actions, and predicted adverb when using the ground-truth action as the query. Our method is able to predict the correct adverb. In the last example, predicted actions are incorrect, but the method correctly identifies a relevant segment and that the action was done 'slowly'. We provide further insights into the learned embedding space in supplementary.
|
| 212 |
+
|
| 213 |
+
# 5.3. Ablation Study
|
| 214 |
+
|
| 215 |
+
We report 4 ablation studies on the various aspects of the method: the choice of action modifier transformation $O_{m}(\cdot)$ , our scaled dot-product attention, the contributions of the loss functions, and the length of the video ( $T$ ). We focus on video-to-adverb retrieval in the ablation using the Antonym P@1 metric, as this allows us to answer questions like: "was the 'cut' performed 'quickly' or 'slowly'?"
|
| 216 |
+
|
| 217 |
+
Action Modifier Representation. In Table 2 we examine different representations for the action modifiers $O_{m}(\cdot)$ (Eq. 2). We compare to a fixed translation from the GloVe representation of the adverb $(m)$ , which is not learned, to three learned representations. First, a learned translation
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Figure 5. Qualitative Results. Temporal attention values from several action queries. The intensity of the color indicates the attention value. Recall that we use the narrated action to weight the relevance of video segments. Using that, we display the top-5 predicted actions, as well as the correctly predicted adverb for all cases.
|
| 221 |
+
|
| 222 |
+
<table><tr><td>\( O_{m}(z)= \)</td><td>Dimension</td><td>Learned</td><td>P@1</td></tr><tr><td>\( z+GloVe(m) \)</td><td>1D</td><td></td><td>0.735</td></tr><tr><td>\( z+b_{m} \)</td><td>1D</td><td>✓</td><td>0.749</td></tr><tr><td>\( W_{m}z \)</td><td>2D</td><td>✓</td><td>0.808</td></tr><tr><td>\( W_{m_{2}}\mathrm{ReLU}(W_{m_{1}}z+b_{m}) \)</td><td>2D</td><td>✓</td><td>0.742</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 2. Comparison of action modifier representation ${O}_{m}\left( \cdot \right)$ . The linear transformation choice clearly improves results.
|
| 225 |
+
|
| 226 |
+
vector $b_{m}$ initialized from the GloVe embedding is used. Second, our chosen representation - a 2D linear transformation with matrix $W_{m}$ as in Eq. 2. Third, we learn a nonlinear transformation implemented as two fully connected layers, the first with a ReLU activation.
|
| 227 |
+
|
| 228 |
+
Results show the linear transformation clearly outperforms a vector translation or the non-linear transformation. The translation vector does not having enough capacity to represent the complexity of the adverb, while the nonlinear transform is prone to over-fitting.
|
| 229 |
+
|
| 230 |
+
Temporal Attention. In Table 3, we compare our proposed multi-head scaled dot-product attention (Sec. 3.3) with alternative approaches to temporal aggregation and attention. In this comparison, we also report action retrieval results, with video-to-action mAP. That is, given the embedding of
|
| 231 |
+
|
| 232 |
+
the video $f'(x, a)$ queried by the ground-truth action, we rank all actions in the embedding $\forall a : g(a)$ by their distances to the visual query and evaluate the rank of the correct action. Our method does not aim for action retrieval as it assumes knowledge of the ground-truth action, however this metric evaluates the quality of the weakly supervised embedding space. Results are compared to:
|
| 233 |
+
|
| 234 |
+
- Single: uses only a one-second clip at the timestamp.
|
| 235 |
+
- Average: uniformly weights the $T$ features.
|
| 236 |
+
- Attention from [29]: widely used class agnostic attention, calculating attention with two fully connected layers, $f'(x, a) = \sigma(w_1 \tanh(W_2 f(x))) W_3 f(x)$ .
|
| 237 |
+
- Class-specific Attention: a version of the above with one attention filter per action class.
|
| 238 |
+
- Ours w/o two-stage optimization: our attention without the first 200-epoch stage of learning action triplets without learning adverbs/modifiers.
|
| 239 |
+
- Ours: our attention as described in Sec. 3.3.
|
| 240 |
+
|
| 241 |
+
Table 3 demonstrates superior performance of our method for the learning of action embeddings and, as a consequence, better learning of action modifiers. These results also demonstrate the challenge of weak-supervision, with video-to-action only performing at $0.246\mathrm{mAP}$ when considering only one second surrounding the narrated action. This improves to 0.692 with our method.
|
| 242 |
+
|
| 243 |
+
<table><tr><td>Method</td><td>Action</td><td>Adverb</td></tr><tr><td>Single</td><td>0.246</td><td>0.705</td></tr><tr><td>Average</td><td>0.257</td><td>0.716</td></tr><tr><td>Attention from [29]</td><td>0.235</td><td>0.708</td></tr><tr><td>Class-specific Attention</td><td>0.401</td><td>0.728</td></tr><tr><td>Ours w/o two-stage optimization</td><td>0.586</td><td>0.774</td></tr><tr><td>Ours</td><td>0.692</td><td>0.808</td></tr></table>
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 6. Performance as $T$ increases. Blue (axis and plot) shows video-to-action retrieval mAP while red shows video-to-adverb retrieval with Antonym P@1.
|
| 247 |
+
|
| 248 |
+
Loss Functions. We also evaluate the need for two separate loss functions (Eqs. 3 and 4). As an alternative approach we use a single loss where the negative contains a different action, a different adverb or both. This performs worse by $0.03\mathrm{P@1}$ . Using both losses, but with another adverb as opposed to only the antonym $\overline{m}$ in Equation 4 also results in worse performance by $0.04\mathrm{P@1}$ .
|
| 249 |
+
|
| 250 |
+
Effect of $T$ . In Fig. 6, we evaluate how the length of the video ( $T$ ) extracted around the weak timestamp affects the model (Sec. 3.3). For larger $T$ , videos are more likely to contain the relevant action, but also other actions. Our embedding function $f'(x, a)$ is able to ignore other actions in the video, up to a point, and successfully learn to attend to the relevant parts given the query action, resulting in better performance with $T \in \{20 \ldots 30\}$ .
|
| 251 |
+
|
| 252 |
+
Comparison with Action Localization. In this work, we perform weakly supervised embedding to learn action modifiers by attending to action relevant segments. Here, we test whether weakly supervised action localization can be used instead of our proposed attention, to locate key segments before learning action modifiers.
|
| 253 |
+
|
| 254 |
+
We use published code of two state-of-the-art weakly supervised action localization methods: W-TALC [40] and CMCS [27]. First, we test the output of these methods with a binary adverb-antonym classifier (Classifier-MLP as in Sec. 5.1). We also test these methods in combination with our embedding and action modifier transfor
|
| 255 |
+
|
| 256 |
+
Table 3. Comparison of temporal attention methods. We report video-to-action retrieval mAP and video-to-adverb retrieval P@1.
|
| 257 |
+
|
| 258 |
+
<table><tr><td>Method</td><td>Attention</td><td>Adverb Rep</td><td>P@1</td></tr><tr><td rowspan="3">W-TALC [40]</td><td>Avg</td><td>Classifier-MLP</td><td>0.705</td></tr><tr><td>Avg</td><td>Action Modifiers</td><td>0.739</td></tr><tr><td>SDP</td><td>Action Modifiers</td><td>0.768</td></tr><tr><td rowspan="3">CMCS [27]</td><td>Avg</td><td>Classifier-MLP</td><td>0.696</td></tr><tr><td>Avg</td><td>Action Modifiers</td><td>0.699</td></tr><tr><td>SDP</td><td>Action Modifiers</td><td>0.705</td></tr><tr><td>Ours</td><td>SDP</td><td>Action Modifiers</td><td>0.808</td></tr></table>
|
| 259 |
+
|
| 260 |
+
Table 4. Comparison of our method (Ours) to weakly supervised action localization methods, with and without our scaled dot-product (SDP) and action modifier representations.
|
| 261 |
+
|
| 262 |
+
mations. For this, we use the methods' predicted action-relevant segments, and average their representation to replace $f'(x, a)$ (Avg). Finally, we combine these relevant segments with our scaled dot-product attention (SDP).
|
| 263 |
+
|
| 264 |
+
From Table 4 we can conclude that using the output of a weakly-supervised localization method is insufficient, and our joint optimization performs best. Worth noting, localizing the action using W-TALC followed by averaging relevant segments outperforms averaging all segments (0.739 vs. 0.716 from Table 3). This shows that W-TALC is capable of finding some relevant segments. This is further improved by our scaled dot-product attention.
|
| 265 |
+
|
| 266 |
+
# 6. Conclusion
|
| 267 |
+
|
| 268 |
+
This paper presents a weakly supervised method to learn from adverbs in instructional videos. Our method learns to obtain and embed the relevant part of the video with scaled dot product attention, using the narrated action as a query. The method then learns action modifiers as linear transformations on the embedded actions; shared between actions. We train and evaluate our method on parsed action-adverb pairs sourced from YouTube videos of 83 tasks. Results demonstrate that our method outperforms all baselines, achieving $0.808\mathrm{mAP}$ for video-to-adverb retrieval, when considering the adverb versus its antonym.
|
| 269 |
+
|
| 270 |
+
Future work will involve learning from few shot examples in order to represent a greater variety of adverbs as well as exploring applications to give feedback to people guided by instructional videos or written instructions.
|
| 271 |
+
|
| 272 |
+
Acknowledgements: Work is supported by an EPSRC DTP, EPSRC GLANCE (EP/N013964/1), Louis Vuitton ENS Chair on Artificial Intelligence, the MSR-Inria joint lab and the French government program, reference ANR-19-P3IA-0001 (PRAIRIE 3IA Institute). Part of this work was conducted during H. Doughty's internship at INRIA Willow Team. Work uses publicly available dataset.
|
| 273 |
+
|
| 274 |
+
# References
|
| 275 |
+
|
| 276 |
+
[1] Jean-Baptiste Alayrac, Piotr Bojanowski, Nishant Agrawal, Josef Sivic, Ivan Laptev, and Simon Lacoste-Julien. Unsupervised learning from narrated instruction videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4575-4583, 2016. 1, 2
|
| 277 |
+
[2] Jean-Baptiste Alayrac, Ivan Laptev, Josef Sivic, and Simon Lacoste-Julien. Joint discovery of object states and manipulation actions. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 2127-2136, 2017. 2
|
| 278 |
+
[3] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5297-5307, 2016. 2
|
| 279 |
+
[4] Damian Borth, Rongrong Ji, Tao Chen, Thomas Breuel, and Shih-Fu Chang. Large-scale visual sentiment ontology and detectors using adjective noun pairs. In Proceedings of the 21st ACM international conference on Multimedia, pages 223-232, 2013. 2
|
| 280 |
+
[5] Marc Brysbaert, Amy Beth Warriner, and Victor Kuperman. Concreteness ratings for 40 thousand generally known english word lemmas. Behavior research methods, 46(3):904-911, 2014. 5
|
| 281 |
+
[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308, 2017. 5
|
| 282 |
+
[7] Chao-Yeh Chen and Kristen Grauman. Inferring analogous attributes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 200-207, 2014. 2, 4
|
| 283 |
+
[8] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), 2018. 5
|
| 284 |
+
[9] Jianfeng Dong, Xirong Li, Chaoxi Xu, Shouling Ji, Yuan He, Gang Yang, and Xun Wang. Dual encoding for zeroexample video retrieval. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 9346-9355, 2019. 2
|
| 285 |
+
[10] Hazel Doughty, Dima Damen, and Walterio Mayol-Cuevas. Who's better? who's best? pairwise deep ranking for skill determination. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6057-6066, 2018. 1
|
| 286 |
+
[11] Hazel Doughty, Walterio Mayol-Cuevas, and Dima Damen. The pros and cons: Rank-aware temporal attention for skill determination in long videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7862-7871, 2019. 1
|
| 287 |
+
[12] Olivier Duchenne, Ivan Laptev, Josef Sivic, Francis R Bach, and Jean Ponce. Automatic annotation of human actions in video. In Proceedings of the IEEE International Conference
|
| 288 |
+
|
| 289 |
+
on Computer Vision (ICCV), volume 1, pages 1491-1498, 2009. 2
|
| 290 |
+
[13] Mark Everingham, Josef Sivic, and Andrew Zisserman. Hello! my name is... buffy"-automatic naming of characters in tv video. In British Machine Vision Conference (BMVC), volume 2, page 6, 2006. 2
|
| 291 |
+
[14] Alireza Fathi and James M Rehg. Modeling actions through state changes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2579-2586, 2013. 2
|
| 292 |
+
[15] Jiyang Gao, Runzhou Ge, Kan Chen, and Ram Nevatia. Motion-appearance co-memory networks for video question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 293 |
+
[16] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 5803-5812, 2017. 2
|
| 294 |
+
[17] De-An Huang, Shyamal Buch, Lucio Dery, Animesh Garg, Li Fei-Fei, and Juan Carlos Niebles. Finding "it": Weakly-supervised reference-aware visual grounding in instructional videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 295 |
+
[18] De-An Huang, Li Fei-Fei, and Juan Carlos Niebles. Connectionist temporal modeling for weakly supervised action labeling. In Proceedings of the European Conference on Computer Vision (ECCV), pages 137–153. Springer, 2016. 2
|
| 296 |
+
[19] De-An Huang, Joseph J Lim, Li Fei-Fei, and Juan Carlos Niebles. Unsupervised visual-linguistic reference resolution in instructional videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2183–2192, 2017. 2
|
| 297 |
+
[20] Phillip Isola, Joseph J Lim, and Edward H Adelson. Discovering states and transformations in image collections. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1383-1391, 2015. 2, 4
|
| 298 |
+
[21] Mihir Jain, Jan C van Gemert, Thomas Mensink, and Cees GM Snoek. Objects2action: Classifying and localizing actions without any video example. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4588-4596, 2015. 2
|
| 299 |
+
[22] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 5
|
| 300 |
+
[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations (ICLR), 2015. 5
|
| 301 |
+
[24] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2017. 2
|
| 302 |
+
[25] Ivan Laptev, Marcin Marszalek, Cordelia Schmid, and Benjamin Rozenfeld. Learning realistic human actions from
|
| 303 |
+
|
| 304 |
+
movies. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-8. IEEE, 2008. 2
|
| 305 |
+
[26] Zhenqiang Li, Yifei Huang, Minjie Cai, and Yoichi Sato. Manipulation-skill assessment from videos with spatial attention network. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW), 2019. 1
|
| 306 |
+
[27] Daochang Liu, Tingting Jiang, and Yizhou Wang. Completeness modeling and context separation for weakly supervised temporal action localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1298-1307, 2019. 8
|
| 307 |
+
[28] Jingen Liu, Benjamin Kuipers, and Silvio Savarese. Recognizing human actions by attributes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3337-3344. IEEE, 2011. 2
|
| 308 |
+
[29] Xiang Long, Chuang Gan, Gerard De Melo, Jiajun Wu, Xiao Liu, and Shilei Wen. Attention clusters: Purely attention based local feature integration for video classification. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7834-7843, 2018. 7, 8
|
| 309 |
+
[30] Jonathan Malmaud, Jonathan Huang, Vivek Rathod, Nick Johnston, Andrew Rabinovich, and Kevin Murphy. What's cookin'? interpreting cooking videos using text, speech and vision. Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), 2015. 1, 2
|
| 310 |
+
[31] Pascal Mettes and Cees GM Snoek. Spatial-aware object embeddings for zero-shot localization and classification of actions. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4443-4452, 2017. 2
|
| 311 |
+
[32] Antoine Miech, Ivan Laptev, and Josef Sivic. Learnable pooling with context gating for video classification. arXiv preprint arXiv:1706.06905, 2017. 2, 3
|
| 312 |
+
[33] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2019. 2, 4, 5
|
| 313 |
+
[34] Ishan Misra, Abhinav Gupta, and Martial Hebert. From red wine to red tomato: Composition with context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1792-1801, 2017. 2, 4, 6
|
| 314 |
+
[35] Niluthpol Chowdhury Mithun, Sujoy Paul, and Amit K Roy-Chowdhury. Weakly supervised video moment retrieval from text queries. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 11592-11601, 2019. 2
|
| 315 |
+
[36] Tushar Nagarajan and Kristen Grauman. Attributes as operators: factorizing unseen attribute-object compositions. In Proceedings of the European Conference on Computer Vision (ECCV), pages 169-185, 2018. 2, 3, 4, 6
|
| 316 |
+
[37] Zhixiong Nan, Yang Liu, Nanning Zheng, and Song-Chun Zhu. Recognizing unseen attribute-object pair with genera
|
| 317 |
+
|
| 318 |
+
tive model. In The Thirty-Third AAAI Conference on Artificial Intelligence, 2019. 2, 4
|
| 319 |
+
[38] Yingwei Pan, Ting Yao, Houqiang Li, and Tao Mei. Video captioning with transferred semantic attributes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6504-6512, 2017. 2
|
| 320 |
+
[39] Bo Pang, Kaiwen Zha, and Cewu Lu. Human action adverb recognition: Adha dataset and a three-stream hybrid model. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 2325-2334, 2018. 2
|
| 321 |
+
[40] Sujoy Paul, Sourya Roy, and Amit K Roy-Chowdhury. W-talc: Weakly-supervised temporal activity localization and classification. In Proceedings of the European Conference on Computer Vision (ECCV), pages 563-579, 2018. 8
|
| 322 |
+
[41] Jeffrey Pennington, Richard Socher, and Christopher Manning. Glove: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, 2014. 3, 5
|
| 323 |
+
[42] Alexander Richard, Hilde Kuehne, and Juergen Gall. Action sets: Weakly supervised action segmentation without ordering constraints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5987-5996, 2018. 2
|
| 324 |
+
[43] Amir Rosenfeld and Shimon Ullman. Action classification via concepts and attributes. In 2018 24th International Conference on Pattern Recognition (ICPR), pages 1499-1505. IEEE, 2018. 2
|
| 325 |
+
[44] Fadime Sener and Angela Yao. Zero-shot anticipation for instructional activities. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 862-871, 2019. 2
|
| 326 |
+
[45] Ozan Sener, Amir R Zamir, Silvio Savarese, and Ashutosh Saxena. Unsupervised semantic parsing of video collections. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4480-4488, 2015. 1, 2
|
| 327 |
+
[46] Reuben Tan, Huijuan Xu, Kate Saenko, and Bryan A Plummer. wman: Weakly-supervised moment alignment network for text-based video segment retrieval. arXiv preprint arXiv:1909.13784, 2019. 2
|
| 328 |
+
[47] Makarand Tapaswi, Martin Buml, and Rainer Stiefelhagen. Book2Movie: Aligning Video scenes with Book chapters. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 2
|
| 329 |
+
[48] Ottokar Tilk and Tanel Alumäe. Bidirectional recurrent neural network with attention mechanism for punctuation restoration. In Interspeech 2016, 2016. 4
|
| 330 |
+
[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems (NeurIPS), pages 5998-6008, 2017. 4
|
| 331 |
+
[50] Xiaoyang Wang and Qiang Ji. A unified probabilistic approach modeling relationships between attributes and objects. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 2120-2127, 2013. 2
|
| 332 |
+
|
| 333 |
+
[51] Yang Wang and Greg Mori. A discriminative latent model of object classes and attributes. In Proceedings of the European Conference on Computer Vision (ECCV), pages 155-168. Springer, 2010. 2
|
| 334 |
+
[52] Michael Wray, Diane Larlus, Gabriela Csurka, and Dima Damen. Fine-grained action retrieval through multiple parts-of-speech embeddings. In The IEEE International Conference on Computer Vision (ICCV), 2019. 2, 3
|
| 335 |
+
[53] Jian Xu, Chunheng Wang, Cunzhao Shi, and Baihua Xiao. Weakly supervised soft-detection-based aggregation method for image retrieval. arXiv preprint arXiv:1811.07619, 2018. 2
|
| 336 |
+
[54] Ran Xu, Caiming Xiong, Wei Chen, and Jason J Corso. Jointly modeling deep video and compositional text to bridge vision and language in a unified framework. In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015. 2, 3
|
| 337 |
+
[55] Li Yao, Atousa Torabi, Kyunghyun Cho, Nicolas Ballas, Christopher Pal, Hugo Larochelle, and Aaron Courville. Describing videos by exploiting temporal structure. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4507-4515, 2015. 2
|
| 338 |
+
[56] Dongfei Yu, Jianlong Fu, Tao Mei, and Yong Rui. Multi-level attention networks for visual question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4709-4717, 2017. 2
|
| 339 |
+
[57] Youngjae Yu, Hyungjin Ko, Jongwook Choi, and Gunhee Kim. End-to-end concept word detection for video captioning, retrieval, and question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3165-3173, 2017. 2
|
| 340 |
+
[58] Rowan Zellers and Yejin Choi. Zero-shot activity recognition with verb attribute induction. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 946-958, 2017. 2
|
| 341 |
+
[59] Kuo-Hao Zeng, Tseng-Hung Chen, Juan Carlos Niebles, and Min Sun. Generation for user generated videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 609-625. Springer, 2016. 2
|
| 342 |
+
[60] Luowei Zhou, Chenliang Xu, and Jason J Corso. Towards automatic learning of procedures from web instructional videos. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018. 2
|
| 343 |
+
[61] Linchao Zhu, Zhongwen Xu, Yi Yang, and Alexander G Hauptmann. Uncovering the temporal context for video question answering. International Journal of Computer Vision, 124(3):409-421, 2017. 2
|
| 344 |
+
[62] Dimitri Zhukov, Jean-Baptiste Alayrac, Ramadan Gokberk Cinbis, David Fouhey, Ivan Laptev, and Josef Sivic. Cross-task weakly supervised learning from instructional videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3537-3545, 2019. 1, 2, 4, 5
|
actionmodifierslearningfromadverbsininstructionalvideos/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d97c9aeaf88c6c9890bbc18b844a21e888b2461659257a2a52ba9a7b80f3cf1
|
| 3 |
+
size 653848
|
actionmodifierslearningfromadverbsininstructionalvideos/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:364b887255ad7ce4a85f320242ed96794bab224e0e391928f10acf7916328759
|
| 3 |
+
size 442615
|
actionsegmentationwithjointselfsupervisedtemporaldomainadaptation/2e7f2736-36f2-4099-8fc8-db04ec170c4f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d6f6e8fa1485594225449717f8fbdc41192b34d84423391d43c040bf4d93efd
|
| 3 |
+
size 73712
|
actionsegmentationwithjointselfsupervisedtemporaldomainadaptation/2e7f2736-36f2-4099-8fc8-db04ec170c4f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:336ea6d92f8c800259d9edb1d8744989acd78ab18ab9116b7ece5f1d59058da0
|
| 3 |
+
size 90098
|