% last updated in April 2002 by Antje Endemann
% Based on CVPR 07 and LNCS, with modifications by DAF, AZ and elle, 2008 and AA, 2010, and CC, 2011; TT, 2014; AAS, 2016

\documentclass[runningheads]{llncs}
\usepackage{graphicx}
\usepackage{amsmath,amssymb} % define this before the line numbering.
\usepackage{ruler}
\usepackage{color}

\usepackage[pagebackref=true,breaklinks=true,letterpaper=true,colorlinks,bookmarks=false]{hyperref}

\usepackage[width=122mm,left=12mm,paperwidth=146mm,height=193mm,top=12mm,paperheight=217mm]{geometry}

\usepackage{cite}

\newcommand{\sign}{sign}
\newcommand{\ternary}{f_{ternary}}
\newcommand{\imTcol}{ten2mat}
\newcommand{\colTim}{mat2ten}
\newcommand{\argmin}{\mathop{\arg\min}}
\newcommand{\argmax}{\mathop{\arg\max}}
\newcommand{\bitcount}{\mathbf{bitcount}}
\newcommand{\XORop}{\mathbf{XOR}}
\newcommand{\ANDop}{\mathbf{AND}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\NA}{-}
\usepackage[normalem]{ulem}
\usepackage{subfigure}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{algorithm}  
\usepackage{algorithmic} 
\usepackage{footnote}

\usepackage{pifont}% http://ctan.org/pkg/pifont
\newcommand{\cmark}{\ding{51}}%
\newcommand{\xmark}{\ding{55}}%

\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}  

\usepackage{xspace}

\makeatletter
\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot}
\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace}

\def\eg{\emph{e.g}\onedot} 
\def\Eg{\emph{E.g}\onedot}
\def\ie{\emph{i.e}\onedot} 
\def\Ie{\emph{I.e}\onedot}
\def\cf{\emph{c.f}\onedot} 
\def\Cf{\emph{C.f}\onedot}
\def\etc{\emph{etc}\onedot} 
\def\vs{\emph{vs}\onedot}
\def\wrt{w.r.t\onedot} 
\def\dof{d.o.f\onedot}
\def\etal{\emph{et al}\onedot}
\makeatother

\renewcommand{\paragraph}{\textbf}
\addtolength{\abovecaptionskip}{-.3cm}
\addtolength{\belowcaptionskip}{-.3cm}
%
\addtolength{\parskip}{-0.02cm}
%
\addtolength{\textfloatsep}{-0.4cm}
%\addtolength{\floatsep}{-0.4cm}
\renewcommand\floatpagefraction{.9}
\renewcommand\topfraction{.9}
\renewcommand\bottomfraction{.9}
\renewcommand\textfraction{.1}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}
% Commands like \normalsize, \small and \footnotesize could change the
% length of \abovedisplayskip, \belowdisplayskip etc.
%
\expandafter\def\expandafter\normalsize\expandafter{%
	\normalsize\setlength\abovedisplayskip{3pt}}
\expandafter\def\expandafter\normalsize\expandafter{%
	\normalsize\setlength\belowdisplayskip{3pt}}

\begin{document}
	% \renewcommand\thelinenumber{\color[rgb]{0.2,0.5,0.8}\normalfont\sffamily\scriptsize\arabic{linenumber}\color[rgb]{0,0,0}}
	% \renewcommand\makeLineNumber {\hss\thelinenumber\ \hspace{6mm} \rlap{\hskip\textwidth\ \hspace{6.5mm}\thelinenumber}}
	% \linenumbers
	\pagestyle{headings}
	\mainmatter
	\def\ECCV18SubNumber{1920}  % Insert your submission number here
	
	\title{TBN: Convolutional Neural Network with Ternary Inputs and Binary Weights} % Replace with your title
	
	\titlerunning{ECCV-18 submission ID \ECCV18SubNumber}
	
	\authorrunning{ECCV-18 submission ID \ECCV18SubNumber}
	
	\author{Anonymous ECCV submission}
	\institute{Paper ID \ECCV18SubNumber}
	
	
	\maketitle
	\begin{abstract}
		%%% at least 70 and at most 150 words.
		%% 目的,方法,结果,结论
		%% 背景
		Despite the remarkable success of Convolutional Neural Networks (CNNs) on generalized visual tasks, high computational and memory costs restrict their comprehensive applications on consumer electronics (\emph{e.g.}, portable or smart wearable devices). Recent advancements in binarized networks have demonstrated progress on reducing computational and memory costs, however, they suffer from significant performance degradation comparing to their full-precision counterparts. Thus, a highly-economical yet effective CNN that is authentically applicable to consumer electronics is at urgent need. In this work, we propose a Ternary-Binary Network (TBN), which provides an efficient approximation to standard CNNs. Based on an accelerated ternary-binary matrix multiplication, TBN replaces the arithmetical operations in standard CNNs with efficient \textbf{XOR}, \textbf{AND} and \textbf{bitcount} operations, and thus provides an optimal tradeoff between memory, efficiency and performance. TBN demonstrates its consistent effectiveness when applied to various CNN architectures (\emph{e.g., AlexNet and ResNet}) on multiple datasets of different scales, and provides $32\times$ memory savings and $40\times$ faster convolutional operations. Meanwhile, TBN can outperform the state-of-the-art binarized network, XNOR-Network, by up to 5.5\% (top-1 accuracy) on the ImageNet classification task, and up to 4.4\% (mAP score) on the PASCAL VOC object detection task.
		%Using binary operations to accelerate and compress convolutional neural networks has proven to be an effective method. 
		%% 方法
		%In this paper, we propose TBN, a method to train convolutional neural networks with ternary inputs and binary weights.
		%% 优势
		% 1)
		%TBN relieves the problem of serious performance degradation suffered from previous works.
		%TBNs use ternary inputs to reduce the quantization error and improve performance. 
		% 2)
		%Binary weights afford $\sim 32\times$ memory savings. 
		% 3) 
		%And accelerated by binary operations, TBNs archive $\sim 40\times$ faster convolutional operations.
		%% 实验结果
		%We evaluate our approach on the classification tasks and object detection tasks.
		% ImageNet
		%On the ImageNet classification task, our Ternary-Binary Network outperforms XNOR-Network by around 5.5\% and 2.9\% top-1 accuracy on the AlexNet and ResNet models, respectively. 
		% PASCAL VOC
		%On the PASCAL VOC object detection task, our method also has better performance (4.4\% mAPs) than XNOR-Network on Faster R-CNN and SSD architecture.
		
		% 需不需要 keywords
		\keywords{Convolutional Neural Network, TBN, Acceleration, Compression, Binary  operation}
	\end{abstract}
	
	%%%%%%%%% BODY TEXT
	\section{Introduction}
	%% CNN优点与缺陷    
	Along with the overwhelming success of deep learning, convolutional neural networks (CNNs) have demonstrated their capabilities in various computer vision tasks \cite{AlexNet,VGG,GoogLeNet,ResNet,dai2016r,lecun2015deep,liu2016ssd,ren2015faster,girshick2014rich,girshick2015fast,long2015fully,pinheiro2015learning}. Effective CNNs normally contain millions of weight parameters, and require billions of high precision operations to be performed for a single classification task. Consequently, either training a CNN model with large-scale data or deploying a CNN model for real-time prediction task has rigid hardware demands (such as GPUs and TPUs) for the overheads in both storage and computing. However, such rigid hardware demands restrict CNNs' comprehensive applications on consumer electronics, such as virtual reality (VR), augmented reality (AR), smart wearable devices and self-driving cars. 
	
	%% 对CNNs缺陷的研究, 及改进方法的缺点
	%% 研究动机 优点
	Significant research efforts have been paid on how to reduce the computational and memory costs of CNNs. Existing CNN lightweighting techniques include pruning \cite{han2015deep,han2015learning,wen2016learning}, 
	quantization \cite{soudry2014expectation,wu2016quantized,zhou2017incremental,courbariaux2014low,gupta2015deep,lin2016fixed,lin2015neural}, 
	factorization \cite{lebedev2014speeding,jaderberg2014speeding,jin2014flattened,kim2015compression,wang2016accelerating,zhang2016accelerating,ambai2016ternary}, 
	network binarization \cite{BNN,XNOR},
	distilling \cite{hinton2015distilling} and 
	others \cite{denil2013predicting,Q-CNN}. Since network binarization can lower 32-bit full-precision values down to 1-bit binary values and allow efficient convolution operations, it has the most potentials in lightweighting the network for practical usages on portable devices. Recent progresses in binarized CNNs \cite{XNOR,BNN} have provided evidences of successfully reducing the computational and memory costs with the binary replacement. In CNNs, two types of values can be binarized: 1) network weights in the convolutional layers and the fully-connected layers, and 2) input signals to the convolutional layers and the fully-connected layers. Binarizing the network weights can directly result in $\sim 32 \times$ memory saving over the real-valued counterparts, and meanwhile bring $\sim 2 \times$ computational efficiency by avoiding the multiplication operation in convolutions. On the other hand, simultaneously binarizing both weights and the input signals can result in $58 \times$ computational efficiency by replacing the arithmetical operations in convolutions with XNOR and bitcount operations. Despite the significant cost reduction, noticeable accuracy degradation of the binarized CNNs introduced new performance issues for practical usages. Undoubtedly, such performance degradation is due to the quantization errors when brutally binarizing real-values in both network weights and layer-wise inputs.
	
	\begin{figure}[t]
		\begin{center}
			\includegraphics[width=\textwidth]{pics/network_struct.pdf}
			\vspace{-0.3cm}
			\caption{The illustration of TBN, which is an efficient variation of convolutional neural networks. The weight filters of TBN have binary values and its inputs contain ternary values. The binary operations is used to accelerate the convolution operation. Our TBN is not only efficient in terms of memory and computation but also has good performance. This helps to use CNNs on resource-limited portable devices.}
			\label{fig:network_struct}
		\end{center}
		\vspace{-0.2cm}
	\end{figure}
	In this work, we aim to improve the performance of binarized CNNs with ternary inputs to the convolutional and fully-connected layers. The ternary inputs constrain input signal values into $-1$, $0$, and $1$, and can essentially reduce the quantization error when binarizing full-precision input signals. By incorporating ternary layer-wise inputs with binary network weights, we propose a Ternary-Binary Network (TBN) that provides an optimal tradeoff between the performance and computational efficiency. The illustration of the pipeline of proposed TBN approach 
	% as well as the quantitative comparisons with the standard full-precision CNNs and binarized CNNs 
	can be found in Fig. \ref{fig:network_struct}. In addition, an efficient threshold-based approximated solution is introduced to minimize the quantization error between the full-precision network weights and the binary weights along with a scaling factor, and an accelerated ternary-binary dot product method is introduced using simple bitwise operations (\emph{i.e.}, \textbf{XOR} and \textbf{AND}) and the \textbf{bitcount} operation. Specifically, 
	%quantizing a single-precision ($32$ bits) input signal into a ternary input ($2$ bits) can introduce $\sim 16 \times$ compression, and 
	TBN can provide $\sim 32 \times$ memory saving and $40\times$ speedup over its real-valued CNN counterparts. Comparing to the state-of-the-art binarized network XNOR-Network \cite{XNOR}, with an identical memory cost and slightly sacrificed efficiency, TBN can outperform XNOR-Network on both image classification task and object detection task. The main contributions of this paper can be summarized as follows:
	\vspace{-1ex}
	\begin{itemize}
		\item We propose a Ternary-Binary Network, which for the first time elegantly incorporates the ternary layer-wise inputs with binary weights and provides an optimal tradeoff between the performance and computational efficiency.
		
		\item We introduce an accelerated ternary-binary dot product method that employs simple \textbf{XOR}, \textbf{AND} and \textbf{bitcount} operations. As a result, TBN can provide $\sim 32 \times$ memory saving and $40\times$ speedup over its real-valued CNN counterparts.
		
		
		\item By incorporating with various CNN architectures (including \emph{LeNet-5},  \emph{VGG-7}, \emph{AlexNet}, \emph{ResNet-18} and \emph{ResNet-34}), TBN can achieve the promising image classification and object detection performance on multiple datasets among quantized neural networks. Particularly, TBN outperforms the state-of-the-art binarized CNN approach XNOR-Network \cite{XNOR} by up to 5.5\% (top-1 accuracy) on the ImageNet classification task, and up to 4.4\% (mAP score) on the PASCAL VOC object detection task.
		
	\end{itemize}
	
	\section{Related Work}
	Abundant research efforts have been paid on how to lightweight standard  full-precision CNNs through quantization. In this section, we list some recent relevant work along such a research line, and discuss how they relate to the proposed TBN. We roughly divide these work into the following two categories: 1) quantized weights with real-value inputs and 2) quantized weights and inputs. 
	
	\begin{table}[t]
		\centering
		\caption{Comparisons between TBN and its closely related methods in terms of input and weight types, numbers of multiply-accumulate operations (MACs) and binary operations required by matrix multiplication, speedup ratios and operation types. More detailed statistical analysis can be found in Section \ref{sec:efficiency}.}
		\vspace{-2mm}
		\label{table:efficiency}
		\resizebox{\textwidth}{!}{ \begin{tabular}{c|ccccccc}
				\toprule \toprule
				\multicolumn{2}{c}{Methods}  & Inputs & Weights & MACs & Binary operations & Speedup & Operations\\
				\midrule
				\midrule
				& Full-precision & $\RR$ & $\RR$ 	& $n \times m \times q$ & 0 & $1 \times $ & +, -, x \\
				\midrule
				\multirow{4}{*}{\rotatebox[]{90}{\tabincell{c}{Quantize\\Weights} }}
				& TTQ \cite{TTQ}  & $\RR$ & $\{-\alpha^{n}, 0 +\alpha^{p}\}$ & $n \times m \times q$ & 0 & $\sim 2 \times $ & +,-\\
				& TWN \cite{TWN}  & $\RR$ & $\{-\alpha, 0, -\alpha\}$        & $n \times m \times q$ & 0 & $\sim 2 \times $ & +,-\\
				& BWN \cite{XNOR} & $\RR$ & $\{-\alpha, +\alpha\}$           & $n \times m \times q$ & 0 & $\sim 2 \times $ & +,-\\
				& BC \cite{BC}    & $\RR$ & $\{-1, +1\}$                     & $n \times m \times q$ & 0 & $\sim 2 \times $ &+,-\\
				
				\midrule
				\multirow{6}{*}{\rotatebox[]{90}{\tabincell{c}{Quantize Inputs\\and Weights} }}
				
				& TNN \cite{TNN} & $\{-1, 0, 1\}$ &  $\{-1, 0, 1\}$ &  0 & $8 \times n \times m \times q $ & $15\times$ & AND, bitcount\\
				
				& GXNOR \cite{GXNOR} & $\{-1, 0, 1\}$ &  $\{-1, 0, 1\}$ & 0 & $5 \times n \times m \times q $ & $15\times$  & AND, bitcount\\
				
				& BNN \cite{BNN}& $\{-1, +1\}$ & $\{-1, +1\}$ & 0 & $2 \times n \times m \times q $  & $64\times$  &  XOR, bitcount\\
				
				& XNOR \cite{XNOR}& $\{-\beta, +\beta\}$ & $\{-\alpha, +\alpha\}$ & $2 \times n \times m $ & $2 \times n \times m \times q $ & $58\times$ & XOR, bitcount \\
				
				& HORQ \cite{HORQ} & $\{-\beta, +\beta\} \times 2$ & $\{-\alpha, +\alpha\}$ & $4 \times n \times m$ & $4 \times n \times m \times q $ & $29\times$  & XOR, bitcount\\
				
				& DoReFa$^{*}$ \cite{DoReFa} & $\{0, 1\} \times 2$ & $\{0, 1\}$ & 0 & $4 \times n \times m \times q $ & $30\times$  & AND, bitcount\\
				\cline{2-8}
				& \textbf{TBN} & $\{-\beta,0, +\beta\}$ & $\{-\alpha, +\alpha\}$ & $n \times m$ & $3 \times n \times m \times q$ & $40\times$  & AND, XOR, bitcount\\
				
				\bottomrule \bottomrule 
		\end{tabular}}
		\scriptsize *We adopt DoReFa Network with 1-bit weight, 2-bit activation.
	\end{table}
	
	\vspace{-0.35cm}
	\subsubsection{Quantized Weights with Real-Value Inputs:}
	Networks with quantized weights can result in a direct reduction in network sizes, however, the efficiency improvement, which is achieved by avoiding the multiplication operation, is limited if the input signals remain real-valued. The most basic forms of weight quantization either directly constrains the weight values into the binary space $\{-1, 1\}$, \emph{e.g.,} BinaryConnect (BC\cite{BC}), or constrain the weight values along with a scaling factor $\{-\alpha, \alpha\}$, \emph{e.g.,} Binary-Weight-Networks(BWNs \cite{XNOR}). Beyond the binary weights, ternary weights are introduced to reduce the quantization error. Ternary Weight Networks (TWNs \cite{TWN}) quantize the weights into $\{-\alpha, 0, \alpha\}$, while Trained Ternary Quantization (TTQ \cite{TTQ}) achieves better performance by constraining the weights to asymmetric ternary values $\{-\alpha^n, 0, \alpha^p\}$. 
	
	\vspace{-0.35cm}
	\subsubsection{Quantized Weights and Inputs:}
	Comparing to the storage, the computational efficiency is a more critical demand for real-time predictions in resource-constrained environments. Since quantizing input signals can potentially replace arithmetical operations with XNOR and bitcount operations and improve the efficiency, networks that attempted to quantize both network weights and layer-wise input signals are proposed. Expectation BackPropagation (EBP {\cite{EBP}}), Binarized Neural Networks (BNNs \cite{BNN}), Bitwise Neural Networks \cite{Kim2015BitwiseNN} and XNOR-Networks {\cite{XNOR}} have explored to brutally binarize input signals in addition to the binary weights. Targeting at lessening the quantization errors, high-order quantization methods, \emph{e.g.,} High-Order Residual Quantization (HORQ \cite{HORQ}), multi-bit quantization methods, \emph{e.g.,} DoReFa-Net \cite{DoReFa} and ternary quantization methods, \emph{e.g.,} Gated XNOR Networks (GXNOR \cite{GXNOR}).% and Ternary Neural Networks (TNN) \cite{TNN}. 
	
	The proposed TBN also falls in the type of networks that quantize both weights and inputs. Comparing to aforementioned work that aim to compensate the effectiveness of binarized networks with degraded efficiency, TBN for the first time provides an elegant integration between the binary weights and ternary inputs, so as to provide an optimal tradeoff between memory, efficiency and performance. We illustrate comparisons of these measurements between TBN and aforementioned method in Table \ref{table:efficiency}.  
	
	
	There are other kinds of methods to compress and accelerate CNNs, \eg pretrained based methods, distillation and so on. 
	Fixed-point Factorized Networks (FFN \cite{FFN}) decomposed the weight matrix of pretrained models into two ternary matrices and a non-negative diagonal matrix so that both the computational complexity and the storage requirement of networks are reduced. 
	Ternary neural networks (TNNs \cite{TNN}) used the teacher networks containing high-precision weights and ternary inputs, to teach the student networks which both weights and inputs are ternary-valued.
	LBCNN \cite{LocalBCNN} used pre-defined binary convolutional filters to reduce the number of learnable parameters.
	
	\section{Ternary-Binary Networks}
	%\section{Our Approach}
	In this section, we introduce our proposed TBN in detail. Firstly, we present some notations and show how to implement the convolutional operation by matrix multiplication. Secondly, we explain how to obtain the binary weights and ternary inputs by approximating their full-precision counterparts. Given the binary weights and ternary inputs, we further illustrate the multiplication between them. Finally, the whole training procedure of our TBN is elaborated.
	
	
	\subsection{Convolution with Matrix Multiplication} \label{MatrixMulti}
	A convolutional layer can be represented by a triplet $\langle \mathbf{I}, \mathcal{W}, * \rangle$. $\mathbf{I} \in \RR^{c \times h_{in} \times w_{in}}$ is a tensor that denotes the input of the convolutional layer, where $(c, h_{in}, w_{in})$ as \emph{channel}, \emph{height} and \emph{width} for input $\mathbf{I}$, respectively. $\mathcal{W}$ is a set of $n$ tensors, where each element in this set $\mathbf{W} \in \mathbb{R}^{c \times h \times w}= \mathcal{W}_{i(i=1,\ldots,n)}$ is the $i^{th}$ weight filter of the convolutional layer, where $n$ is number of weight filters, and ($h, w$) represent the filter size. And $*$ represents the convolution operation with $\mathbf{I}$ and $\mathbf{W}$ as its operands. Let $\vec{C} \in \RR^{n \times h_{out} \times w_{out}}$ be the product of the convolution layer, where $h_{out} = (h_{in} + 2 \cdot p - w) / s + 1$ and $w_{out} = (w_{in} + 2 \cdot p - w) / s + 1$, and $(p,s)$ represent the pad and stride parameter respectively. 
	
	As adopted in the popular Caffe package {\cite{Caffe}}, we use matrix multiplication to implement the convolution layer $\langle \mathbf{I}, \mathcal{W}, * \rangle$. Specifically, by flattening each filter $\mathbf{W}$ to a row vector of shape $1 \times q~(q = c \times h \times w)$, the set of weight filters $\mathcal{W}$ can be reshaped to a matrix $\widetilde{\mathbf{W}} \in \RR^{n \times q}$. We use function $\imTcol$ to represent this step, \ie $\widetilde{\mathbf{W}} = \imTcol(\mathcal{W})$. Similarly, transforming each sub-tensor in the input tensor $\mathbf{I}$ with the same size as the filter to a column vector, we get the matrix $\widetilde{\mathbf{I}} \in \mathbb{R}^{q \times m} (m = h_{out} \times w_{out})$ after accumulating these vectors, \ie $\widetilde{\mathbf{I}}=\imTcol(\mathbf{I})$. Let we denote the product of the matrix multiplication with $ \widetilde{\mathbf{I}} $ and $\widetilde{\mathbf{W}}$ as its operands as $\widetilde{\mathbf{C}} \in \mathbb{R}^{n \times m}$, \ie $ \widetilde{\mathbf{C}} = \widetilde{\mathbf{W}} \widetilde{\mathbf{I}}$. Finally, we reshape the matrix $\widetilde{\mathbf{C}}$ back to output tensor $\mathbf{C}$. This step is the the inverse operation of $\imTcol$, denoted it by $\colTim$. This is the entire process of implementing a convolutional layer using matrix multiplication. We can use the following formula to summarize the whole progress:
	\begin{equation}
	\label{eq:ten2mat}
	\mathbf{C} = \colTim(\widetilde{\mathbf{W}} \widetilde{\mathbf{I}}), 
	\widetilde{\mathbf{W}} = \imTcol(\mathcal{W}),
	\widetilde{\mathbf{I}} = \imTcol(\mathbf{I})
	\end{equation}
	It is noteworthy that fully connected layers can also be implemented by convolution, thus, in the rest of the paper, we refer to them also as convolutional layers \cite{XNOR}.
	
	
	\subsection{Binary Weights} \label{sec:binary}
	
	Following XNOR-Networks \cite{XNOR}, we adopt the similar paradigm to estimate the binary weights. Concretely, we use a binary filter $\mathbf{B} \in \{-1, +1\}^{c \times h \times w}$ and a scaling factor $\alpha \in \mathbb{R}^+$ to approximate a full-precision weight filter $\mathbf{W} \in \mathcal{W}$ such that $\mathbf{W} \approx \alpha \mathbf{B}$.
	The optimal approximation is obtained by solving the optimization problem of minimizing the $\ell_2$ distance between full-precision and binary weight filters, \ie $\displaystyle \alpha, \mathbf{B} = \argmin_{\alpha, \mathbf{B}} {\|\mathbf{W} - \alpha \mathbf{B} \|^2_2}$. The optimal solution is 
	\begin{equation}
	\label{eq_1}
	\mathbf{B} = \sign(\mathbf{W}),~
	\alpha = \frac{1}{c \times h \times w} \| \mathbf{W} \|_{1}.
	\end{equation}
	The binary weight filter $\mathbf{B}$ is obtained by taking the element-wise $\sign$ of weight values, and the scaling factor $\alpha$ is the average of absolute weight values. The binary weight filters obtained by this simple strategy can reduce the storage of a convolutional layer by $\sim 32\times$ compared to single-precision filters.
	
	\subsection{Ternary Inputs} \label{sec:ternary}
	In XNOR-Networks, the strategy to quantize the inputs of a convolutional layer to binary become quite complex. They taken the sign of input values to get binary input and calculated a matrix $\mathbf{A}$ by averaging the absolute values of elements in the input $\mathbf{I}$ across the channels, \ie, $\mathbf{A} = \frac{\sum{\|\mathbf{I}_{:,:,i}\|}}{c} $. Then the scaling matrix $\mathbf{K}$ for the input was obtained by convolving the matrix $\mathbf{A}$ by a kernel $\mathbf{k}\in \mathbb{R}^{h \times w}$ with $\mathbf{k}_{ij} = \frac{1}{h \times w}$. However, the scaling factors of the binary inputs does not affect the performance of XNOR-Networks. % After removing the 	scaling factor for the inputs reduce the accuracy by a small margin (less than 1 \% top-1 alexnet).
	Indicated by this, we abandon the scaling factors for the inputs to reduce unnecessary computation. On the other hand, TWN \cite{TWN} with ternary weights has better performance than BWN \cite{XNOR} with binary weights. In order to improve the performance of binarized CNNs, we quantize each element of input tensor $\mathbf{I}$ into a ternary value $\{-1, 0, 1\}$ without the scaling factor. 
	
	We propose following threshold-based ternary function $\ternary$ to obtain ternary input tensor $\mathbf{T} \in \{-1, 0, +1\}^{c \times h_{in} \times w_{in}}$:
	\begin{equation} \label{eq:ternary}
	\mathbf{T}_{i} 
	= \ternary(\mathbf{I}_{i},\Delta) 
	= \left\{ \begin{array}{rl}
	+1, &\mathbf{I}_{i} > ~\Delta; \\
	0,  &|\mathbf{I}_{i}| \leq \Delta; \\
	-1, &\mathbf{I}_{i} < -\Delta; \\
	\end{array} \right.
	\end{equation}
	where $\Delta \in \mathbb{R}^+$ is an positive threshold parameter. The value of $\Delta$  controls  the numbers of -1, 0 and 1 in $\mathbf{T}$, which will highly affect the final accuracy. When $\Delta$ is equal to 0, the function $\ternary$ degenerates to the sign function. So that, a same performance as XNOR-Network will be obtained. However, when $\Delta$ is too big,  each element in $\mathbf{T}$ will be zero according to Equation \eqref{eq:ternary}, and we will get the worst result. Thus, an appropriate value of $\Delta$ is necessary. We assume the best performance will be obtained when the number of -1, 0 and +1 in $\mathbf{T}$ is identical. So how to get the optimal $\Delta$ to balance the number of -1, 0 and +1 is critical.
	
	There is no straightforward approach to obtain optimal $\Delta$. Though discrete optimization is good method (due to states of $\mathbf{I}$ are finite), it can be time consuming. Instead, We make an assumption that $\mathbf{I}$ is distributed normally with zero mean and variance $\sigma^2$, \ie ${\displaystyle \mathbf{I} \sim {\mathcal {N}}(0 ,\sigma ^{2})}$. Thus the approximated $\Delta$ is $0.4\sigma$, which equal to $0.4\mathrm{E}(|\mathbf{I}|)$, where $\mathrm{E}(|\mathbf{I}|)$ is the expectations of absolute inputs values. So we calculate $\Delta$ use following formula:
	\begin{equation}
	\label{eq_4}
	\Delta = 0.4\mathrm{E}(|\mathbf{I}|) \approx \frac{0.4}{c \times h_{in} \times w_{in}}\|\mathbf{I}\|_{1}
	\end{equation}
	The experiments show our assumption is reasonable.
	By this means, it is fast and easy to quantize a real-valued input tensor into a ternary architecture.
	
	\subsection{Ternary-Binary Dot Product} \label{sec:tbdot}	
	Once we obtain the binary weights and ternary inputs, how to achieve effective ternary-binary multiplication is our next target. As we know, the matrix multiplication is based on the dot product. That is to say, the entry $C_{ij} \in \widetilde{\mathbf{C}} $ is the result of dot product between the $i^{th}$ row of weight matrix $\widetilde{\mathbf{W}}$ and the $j^{th}$ column of input matrix $\widetilde{\mathbf{I}}$. \Ie, $C_{ij} = \vec{\widetilde{W}_i} \cdot \vec{\widetilde{I}_j}$
	where $\cdot$ is the dot product, $\vec{\widetilde{W}_i} = [\widetilde{W}_{i1}, \ldots, \widetilde{W}_{iq} ] $  and $\vec{\widetilde{I}_j} = [\widetilde{I}_{1j}, \ldots, \widetilde{I}_{qj} ]$. 
	We can use binary operations to accelerate the dot product with a binary vector and a ternary vector as its operands. Let us use $\alpha \vec{b}$ to denote the binary filter $\mathbf{B}$ corresponding to $\vec{\widetilde{W}_i}$, where $\vec{\widetilde{W}_i} \approx \alpha \vec{b}, \vec{b} = \imTcol(\mathbf{B})\in \{-1,1\}^{q}$ and $\alpha$ is the scaling factor. Similarly, the ternary vector $\vec{t} \in \{-1, 0, +1\}^{q}$ corresponds to $\vec{\widetilde{I}_j}$. So we can implement this special dot product efficiently with the following formula:
	\begin{equation} \label{eq:tbn}
	C_{ij} =  \alpha (c_t - 2 \times \bitcount((\vec{b}~\XORop~\vec{t}')~\ANDop~\vec{t}'')),
	\end{equation}
	where we decompose vector $\vec{t}$ into two vector $\vec{t}' \in \{-1, 1\}^{q}$ and $\vec{t}'' \in \{0, 1\}^{q}$ as follows:
	\begin{equation} \label{eq:decompose}
	\vec{t}'_{i} = \left\{ \begin{matrix}
	1, & \vec{t}_{i} = 1 \\
	-1, &\mbox{otherwise}
	\end{matrix}
	\right.
	%\end{equation}
	%\begin{equation}
	,~
	\vec{t}''_{i} = \left\{ \begin{matrix}
	0, & \vec{t}_{i} = 0 \\
	1, &\mbox{otherwise}
	\end{matrix}
	\right.
	,~ i = 1, \ldots, q
	\end{equation}
	so that $t_i = t'_i \times t''_i$. $c_t = \bitcount(\vec{t}'') = \|\vec{t}\|_1$ is a constant which is independent of $\vec{b}$. In Equation \eqref{eq:tbn}, the operation $\bitcount$ return the count of number of bits set to 1 and $\XORop, \ANDop$ are the logic operations. It should be noted that 1 in $\vec{b}, \vec{t}', \vec{t}''$  is considered to be logic true, and the others (\ie 0, -1) are regard as logic false. So we can implement the efficient matrix multiplication.
	
	\renewcommand{\algorithmicrequire}{ \textbf{Input:}} %Use Input in the format of Algorithm  
	\renewcommand{\algorithmicensure}{ \textbf{Output:}} %Use Output in the format of Algorithm
	\begin{algorithm}[!t]  
		\caption{$\mathrm{TBConvolution}(\mathcal{W}, \mathbf{I})$}
		\label{algo:tb_conv}
		\begin{algorithmic}[1]
			\REQUIRE A set of weight filters $\mathcal{W}\in \mathbb{R}^{n \times c \times h \times w}$, the input tensor $\mathbf{I} \in \mathbb{R}^{c \times h_{in} \times w_{in} }$ and convolutional parameters including the stride $s$ and pad $p$
			\ENSURE The convolutional result $\mathbf{C} \in \mathbb{R}^{n \times h_{out} \times w_{out}}$
			\FOR {$i^{th}$ filter $\mathbf{W}$ in $\mathcal{W}$ }
			\STATE $\alpha = \frac{1}{c \times h \times w}\|\mathbf{W}\|_{1}$ // \texttt{calculate the scaling factor w.r.t. Eq. (\ref{eq_1})}
			\STATE $\mathbf{B} = \sign(\mathbf{W})$ // \texttt{get the binary filter w.r.t. Eq. (\ref{eq_1})}
			\STATE $\mathbf{W} \approx \alpha \mathbf{B} $
			\ENDFOR
			\STATE $\Delta = \frac{0.4}{c \times h_{in} \times w_{in}} \|\mathbf{I}\|_{1}$ // \texttt{calculate the threshold parameter w.r.t. Eq. (\ref{eq_4})}
			\STATE $\mathbf{T} = \ternary(\mathbf{I}, \Delta)$ // \texttt{get the ternary input w.r.t. Eq. (\ref{eq:ternary})}
			\STATE $\widetilde{\mathbf{W}} = \imTcol(\mathcal{W}) $ and $\widetilde{\mathbf{I}} = \imTcol(\mathbf{T})$ // \texttt{covert weights and input tensors to matrices w.r.t. Eq. (\ref{eq:ten2mat})}
			\STATE $\widetilde{\mathbf{C}} = \widetilde{\mathbf{W}}\widetilde{\mathbf{I}}$ // \texttt{accelerate matrix multiplication w.r.t. Eq. (\ref{eq:tbn})}
			\STATE $ \mathbf{C} = \colTim(\widetilde{\mathbf{C}})$ // \texttt{convert the product to the tensor w.r.t. Eq. (\ref{eq:ten2mat})}
		\end{algorithmic}  
	\end{algorithm}
	
	\subsection{Training TBN} \label{sec:training_tbn} 
	
	With above strategies, we can get an very fast convolutional layer with ternary inputs and binary weights (TBConvolution), and Algorithm \ref{algo:tb_conv} demonstrates how TBConvolution works. 
	In the TBN, there is a batch normalization layer {\cite{BatchNormal}} to normalize the inputs before each TBConvolution, so that the number of -1, 0, 1 for ternary inputs is more balanced. 
	A non-linear activation layer (\eg ReLU) after each TBConvolution is optional because ternary quantization can play the role of the non-linear activation function. 
	Other layers (\eg pooling and dropout) can be inserted after TBConvolution (or before the batch normalization layer).
	To train TBN, the full-precision gradient is adopted and we use straight-through estimator to compute the gradient of binary and ternary quantization function, \ie
	\begin{equation} \label{eq:grad}
	\frac{\partial \sign}{\partial r} = \frac{\partial \ternary}{\partial r} = \vec{1}_{|r| < 1} = \left\{
	\begin{array}{ll}
	1, & |r| < 1 \\
	0, & \mbox{otherwise}
	\end{array} \right.
	\end{equation}
	Similar to the strategy used in BNN \cite{BNN}, XNOR-Networks \cite{XNOR} and HORQ \cite{HORQ}, we do not apply our approach on the first or last layer.
	Algorithm \ref{algo:Training} demonstrates the procedure for training an $L$-layer Ternary-Binary Network. We can use any optimizer (\eg 
	%stochastic gradient descent (SGD) with momentum, 
	ADAM \cite{Adam}) to train TBNs.
	
	% Additionally, it is important to put very little or no weight decay (L2 regularization) when training the Ternary-Binary Networks, since the quantization process plays the role of regularization. 
	
	\begin{algorithm}[t]  
		\caption{Training an $L$-layer TBN}
		\label{algo:Training}
		\begin{algorithmic}[1]  
			\REQUIRE 
			A minibatch of inputs and targets $(\mathbf{X}_0, \mathbf{Y})$, cost function $C(\mathbf{Y}, \hat{\mathbf{Y}})$, current weights $\widehat{\mathcal{W}}(t) = \{\mathcal{W}_l(t)\}_{l=1}^L$,  and current learning rate $\eta(t)$
			\ENSURE 
			updated weight $\widehat{\mathcal{W}}(t+1)$ and updated learning rate $\eta(t+1)$
			
			% \STATE $\mathbf{X}_1 = \mathrm{Convolution}(\vec{X}_0)$
			\FOR{$l=1$ to $L$}
			\STATE $\mathbf{I}_l = \mathrm{BatchNormalization}(\mathbf{X}_{l-1})$
			\STATE $\mathbf{X}_l = \mathrm{TBConvolution}(\mathcal{W}_l(t), \mathbf{I}_l) $
			\ENDFOR
			\STATE $\frac{\partial C}{\partial \mathcal{W}} = \mathrm{Backward}(\frac{\partial C}{\partial \mathbf{X}_L}, \widehat{\mathcal{W}}(t))$ // \texttt{standard backward propagation while Equation \eqref{eq:grad} is applied}
			\STATE $\widehat{\mathcal{W}}(t+1) = \mathrm{UpdateParameters}(\widehat{\mathcal{W}}(t), \frac{\partial C}{\partial \mathcal{W}}, \eta(t))$ // \texttt{Any optimizer (\eg ADAM)}
			\STATE $\eta(t+1) = \mathrm{UpdateLearningRate}(\eta(t), t)$ // \texttt{Any learning rate scheduling function}
		\end{algorithmic}  
		\textbf{Note:} $\mathcal{W}_l$ here is identical to the $l^{th}$ layer TBN weights $\mathcal{W}$ (mentioned in Section \ref{MatrixMulti})
	\end{algorithm}  
	
	\section{Experiments}
	
	As the proposed TBN uses ternary inputs with binary weights to simultaneously reduce the approximation error caused by quantization but maintain the reasonable performance to some extend, the goal of our experiments is mainly to answer the following three research questions:
	\begin{itemize}
		\item \textbf{Q1:} How does TBN perform compared to other quantized/squeezed deep networks (\ie, XNOR-Networks, HORQ)  in different tasks (\ie, image recognition and object detection)?
		\item \textbf{Q2:} How fast can TBN accelerate compared to other quantized networks?
		\item \textbf{Q3:} How is the performance of TBN influenced by different components (\eg the sparsity of ternary inputs, the usage of activation functions)?
	\end{itemize}
	
	\begin{table}[t]
		\centering
		\caption{The classification accuracies of different CNNs trained with various models on the four datasets. Both ``top-1/top-5'' accuracies are presented for the ImageNet dataset. ``-'' indicates that the results are not provided in their original papers.}
		\label{table:results}
		\resizebox{\textwidth}{!}{
			\begin{tabular}{c|ccccccc}
				\toprule \toprule
				\multicolumn{2}{c}{Dataset} & MNIST   & CIFAR-10 & SVHN  & ImageNet & ImageNet & ImageNet\\
				\multicolumn{2}{c}{Models}  & LeNet-5 & VGG-7    & VGG-7 & AlexNet  & ResNet-18 & ResNet-34\\
				\midrule
				\midrule
				
				& Full-precision & 99.48 & 92.88 & 97.68 & 57.2/80.2 & 69.3/89.2 & 73.3/91.4\\
				
				\midrule
				\multirow{4}{*}{\rotatebox[]{90}{\tabincell{c}{Quantize\\Weights} }}
				
				& BC \cite{BC}         & 98.82  & 91.73  & 97.85  & 35.5/61.0 & \NA       & \NA\\
				& BWN \cite{XNOR}      & 99.38  & 92.58  & 97.46  & 56.8/79.4 & 60.8/83.0 & \NA\\
				& TWN \cite{TWN}	   & 99.38  & 92.56  & \NA    & 54.5/76.8 & 65.3/86.2 & \NA \\
				& TTQ \cite{TTQ}	   & \NA    & \NA    & \NA    & 57.5/79.7 & 66.6/87.2 & \NA \\
				
				\midrule
				\multirow{4}{*}{\rotatebox[]{90}{\tabincell{c}{Other\\Methods}}}
				& FFN \cite{FFN}     	    & \NA   & \NA   & \NA   & 55.5/79.0 & \NA        & \NA \\
				& LCNN-fast \cite{LCNN}     & \NA   & \NA   & \NA   & 44.3/68.7 & 51.8/76.8  & \NA \\
				& LCNN-accurate \cite{LCNN} & \NA   & \NA   & \NA   & 55.1/78.1 & 62.2/84.6  & \NA \\
				& LBCNN \cite{LocalBCNN}    & 99.51 & 92.66 & 94.50 & 54.9/\NA  & \NA        & \NA  \\
				
				\midrule
				\multirow{8}{*}{\rotatebox[]{90}{\tabincell{c}{Quantize Inputs\\ and Weights}}}
				
				& TNN {\cite{TNN}} & 98.33 & 87.89 & 97.27 & \NA & \NA & \NA  \\
				& GXNOR {\cite{GXNOR}}    & 99.32 & 92.50 & 97.37 & \NA & \NA & \NA  \\
				& BNN \cite{BNN} 	      & 98.60 & 89.85 & 97.47 & 27.9/50.42 & \NA  & \NA \\
				& DoReFa-Net$^{*}$ {\cite{DoReFa}}  & \NA & \NA & 97.6 & 47.7/\NA & \NA & \NA  \\
				& BinaryNet \cite{BinaryNet} & \NA & \NA & \NA & 46.6/71.1 & \NA & \NA  \\
				& HORQ \cite{HORQ} & 99.38 & 91.18& 97.41 & \NA  & 55.9/78.9  & \NA \\
				& XNOR-Network \cite{XNOR} & 99.21 & 90.02 & 96.96 & 44.2/69.2 & 51.2/73.2 & 55.9/79.1\\
				\cline{2-8}
				& \textbf{TBN}   &99.38 & 90.85 & 97.27 & 49.7/74.2 &  55.6/79.0 & 58.2/81.0\\
				\bottomrule \bottomrule 
		\end{tabular}}
		\scriptsize *We adopt DoReFa-Net with 1-bit weight, 2-bit activation and 32-bit gradient for fair comparison.
	\end{table}
	
	\vspace{-3ex}
	\subsection{Image Classification}
	\noindent\textbf{Datasets:}
	We evaluate the performance of our proposed approach on four different datasets, \ie MNIST, CIFAR-10, SVHN, ImageNet (ILSVRC2012), and compare it with other methods. The MNIST \cite{MNIST} is an image classification benchmark dataset of hand-written digits from 0 to 9, and it consists of a 60,000 training set and a testing set of 10,000 $32 \times 32$ gray-scale images. CIFAR-10 \cite{CIFAR10} is also an image classification benchmark dataset, which consists of 50,000 training images and 10,000 test images, where each instance is a $32 \times 32$ color image across 10 categories. SVHN (Street View House Numbers \cite{SVHN}) is a dataset consisting of $32 \times 32$ color house number digits cropped from street view images. SVHN contains 604,000 images (with about 531,000 difficult samples to be used as extra) for training and 26,000 examples for test. The large-scale ImageNet classification dataset (ILSVRC 2012) \cite{ImageNet} contains over 1.28 million training images from 1,000 classes, and 50,000 images in the validation set. As previously mentioned, our TBN can accommodate any network architectures. Hence, we performance the following evaluations with different networks on the above datasets. Note that we adopt the Adam optimizer with Batch Normalization to speed up the training, and ReLU is adopted as the activation function in all the following experiments. In addition, all the deep networks used in this section are training from the scratch. 
	
	\noindent\textbf{Results on MNIST with LeNet-5:}
	The LeNet-5 \cite{MNIST} architecture we used is ``32-C5 + MP2 + 64-C5 + MP2 + 512-FC + 10-FC + SVM". It is composed of two convolutional layers with size $5 \times 5$, a fully connected layer and a SVM classifier with 10 labels. Specifically, there is no pre-processing, data-augmentation or pre-training skills to remain the challenge.  The learning rate starts at 0.001 and is divided by 10 at epoch 15, 30, 45 with the mini-batch size 200. We report the best accuracy on the testing set. From the results shown in Table \ref{table:results}, we observe that our TBN has the same performance as HORQ but outperforms XNOR-Network by 0.17\%. In fact, on the MNIST dataset, there are subtle difference between those methods (less than 1\%).
	
	\noindent\textbf{Results on CIFAR-10 with VGG-7:} \label{vgg7}
	To train the networks on CIFAR-10 dataset, we follow the same data augmentation scheme in ResNet {\cite{ResNet}}. In detail, we use the VGG inspired architecture, denoted as VGG-7, by: 
	``2$\times$(128-C3)+MP2+2$\times$(256-C3)+MP2+2$\times$(512-C3)+MP2+1024-FC+10-FC+Softmax", 
	where C3 is a $3 \times 3$ convolutional block, MP2 is a max-pooling layer with kernel size 2 and stride 2, and Softmax is a softmax loss layer. We train this model for 200 epochs with a mini-batch of 200. The learning rate also starts at 0.001 and is scaled by 0.5 every 50 epochs. The results are given in Table \ref{table:results}. The accuracy of our TBN on CIFAR-10 is higher than XNOR-Network's and BNNs. However, compared with HORQ, and GXNOR, the performance of TBN is slightly worse since more quantization for both inputs and weights are adopted in our methods.
	
	\noindent\textbf{Results on SVHN with VGG-7:}
	We also use VGG-7 networks for SVHN. Because SVHN is a much larger dataset than CIFAR-10, we only train VGG-7 for 60 epochs. From,  results presented in Table \ref{table:results}, it is easily discovered that the performances between TBN, HORQ, XNOR-Networks, BNN, GXNOR and TNN is almost at the same level.
	
	\noindent\textbf{Results on ImageNet with AlexNet:}
	In this experiment, we report our classification performance in terms of top-1 and top-5 accuracies using AlexNet. Specifically, AlexNet is with 5 convolutional layers and two fully-connected layers.
	We train the network for 100 epochs. The learning rate starts at 0.001 and is divided by 0.1 every 25 epochs. Figures \ref{fig:result}(a) and \ref{fig:result}(b) demonstrate the classification accuracy for training and inference along with the training epochs. The solid lines represent training and validation accuracy of TBN, and dashed lines show the accuracy of XNOR-Network.  The final accuracy of AlexNet is showed in Table \ref{table:results}, which illustrates our TBN outperforms XNOR-Network by the large margin (5.5\% on top-1 accuracy and 5.0\% on top-5 accuracy).
	\begin{figure} [t]
		\centering
		\includegraphics[width=0.32\textwidth]{pics/AlexNet_top1.eps}
		\includegraphics[width=0.32\textwidth]{pics/AlexNet_top5.eps}
		\includegraphics[width=0.32\textwidth]{pics/ResNet18_top1.eps}
		\includegraphics[width=0.32\textwidth]{pics/ResNet18_top5.eps}
		\includegraphics[width=0.32\textwidth]{pics/ResNet34_top1.eps}
		\includegraphics[width=0.32\textwidth]{pics/ResNet34_top5.eps}
		\vspace{-2ex}
		\caption{
			(a) and (b) compare the top-1 and top-5 accuracies between TBN and XNOR-Networks on AlexNet; (c) and (d) compare the top-1 and top-5 accuracies between TBN, HORQ and XNOR-Networks on ResNet-18; (e) and (f) compare the top-1 and top-5 accuracies between TBN and XNOR-Networks on ResNet-34.}
		\label{fig:result}
		\vspace{0.3cm}
	\end{figure}
	
	\noindent\textbf{Results on ImageNet with ResNet:}
	In addition to the AlexNet, we also train two Ternary-Binary Networks for both ResNet-18 and ResNet-34 {\cite{ResNet}} architectures on the ImageNet dataset.
	% \footnote{Our implementation of ResNet-18 and ResNet-34 model are based on \url{https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py}
	We run the training algorithm for 60 epochs with a mini-batch size of 128. The learning rate starts at 0.001 and is scaled by 0.1 every 20 epochs. ResNet-34 adopts the same training strategy, but is only trained with 30 epochs in total and the learning rate is decayed every 10 epochs. Figures \ref{fig:result}(c) and (d), and Figures \ref{fig:result}(e) and (f) demonstrate the classification accuracies (top-1 and top-5) of ResNet-18 and ResNet-34 respectively, along with the epochs for training and inference. The final results are reported in Table \ref{table:results}, which show that Ternary-Binary Network is better than XNOR-Networks (ResNet-18: by 4.4\%/4.8\% on top-1/top-5, ResNet-34: by 2.3\%/1.9\% on top-1/top-5). 
	Meanwhile, the performance of our TBN is competitive to that of HORQ (top1: 55.6\% vs. 55.9\%; top-5 79.0\% vs. 78.9\% on ResNet-18).
	
	
	
	\subsection{Object Detection}
	We also evaluate the performance of TBN on the object detection task. Various modified network architectures are used, including Faster-RCNN \cite{ren2015faster}, and Single Shot Detector (SSD \cite{liu2016ssd}). We change the base network of these architectures to the TBN with ResNet-34. We compare the performance with XNOR-Networks and full-precision networks. We evaluate all methods on the PASCAL VOC dataset \cite{pascal-voc-2007,pascal-voc-2012}, which is a standard recognition benchmark with detection and semantic segmentation challenges. We train our models on the combination of VOC2007 \texttt{trainval} and VOC2012 \texttt{trainval} (16,551 images) and test on VOC2007 \texttt{test} (4,952 images).
	
	To modify the base network of SSD, we replace the truncated VGG-16 and conv8\_1, conv8\_2 by ResNet-34 where the global average pooling layer and its subsequent layers are removed. We use the last layers of stage 3, 4, 5 in ResNet-34 instead of conv4\_3, conv7(fc7), conv8\_2 in the original SSD to predict both locations and confidences, and the $\ell_2$ normalization technique is applied on these layers. The rest settings are the same with the original SSD.
	
	The comparison results for object detection are illustrated in Table \ref{table:detection}. As can be seen from the table, the models based on ResNet-34 can achieve better performance both on Faster R-CNN and SSD. Our TBN with ResNet achieves up to 4.4\% higher than XNOR-Networks in terms of mAP, although there is a large margin compared to full-precision networks.
	
	\begin{table} [t]
		\begin{center}
			\caption{The performance (in mAP) comparison of TBN, XNOR-Networks and full-precision CNN models for object detection. All methods are trained on the combination of VOC2007 and VOC2012 \texttt{trainval} sets and tested on the VOC2007 \texttt{test} set.}
			\label{table:detection}
			\begin{tabular}{cccc|c}
				\toprule
				method & full-precision & ~full-precision & ~XNOR-Networks &  ~\textbf{TBN} \\
				base network & VGG-16 & ResNet-34 & ResNet-34 & \textbf{ResNet-34}\\
				\midrule
				Faster R-CNN &  73.2  & 75.6& 54.7& 59.0 \\
				SSD 300 & 74.3  & 75.5  & 55.1& 59.5 \\
				\bottomrule
			\end{tabular}
		\end{center}
		\vspace{-1ex}
	\end{table}
	
	\subsection{Efficiency}\label{sec:efficiency}
	In this section, we will illustrate the efficiency comparison between different methods. Suppose matrix multiplication $\widetilde{\mathbf{C}} = \widetilde{\mathbf{W}} \widetilde{\mathbf{I}}$, where $\widetilde{\mathbf{W}} \in \RR^{n \times q}$, $\widetilde{\mathbf{I}} \in \RR^{q \times m}$ and $\widetilde{\mathbf{C}} \in \RR^{n \times m}$. 
	To calculate $\widetilde{\mathbf{C}}$, there are $n \times m \times q$ multiply-accumulate operations (MACs) required. 
	% more detail about MAC: https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation
	If the matrix $\widetilde{\mathbf{I}}$ is quantized to ternary values and the matrix $\widetilde{\mathbf{W}}$ is binary-valued, matrix multiplication requires $n \times m$ multiplications and $n \times m \times q$ $\ANDop$, $\XORop$ and $\bitcount$ operations respectively, according to Equation \eqref{eq:tbn}. We provide a comparison between our approach and the related works using quantized inputs and weights in Table \ref{table:efficiency}.  Compared with XNOR-Networks, our approach increases $n \times m \times q$ binary operations and saves $n \times m$ MACs, while HORQ needs twice the number of MACs and binary operations as XNOR networks.
	
	The general computation platform (\ie, CPU, GPU, ARM) can perform an $L$-bits binary operation in one clock cycle ($L = 64$ typical \footnote{\scriptsize An Intel SSE(, AVX, AVX-512) instruction can perform 128(, 256, 512) bits AND / XOR operation.}). Assume the ratio between the speed of performing an $L$-bits binary operation and a multiply-accumulate operation is $\gamma$, \ie 
	\begin{equation}
	\gamma = \frac{\mbox{average time required by MAC}}
	{\mbox{average time required by } L \mbox{-bits binary operation}}
	\end{equation}
	Therefore, the speedup ratio of Ternary-Binary Networks is: % can be computed as:
	\begin{equation} \label{speed_tb}
	S 
	= \frac{\gamma  n m q}{\gamma n m + 3 n m \lceil \frac{q}{L} \rceil} 
	= \frac{\gamma q}{\gamma + 3 \lceil \frac{q}{L} \rceil}
	\end{equation} 
	It shows that speedup ratio depends on $q$, $L$, while $\gamma$ is determined by machine. 
	For a convolutional layer, $q = c \times h \times w$, that is to say, $S$ is independent of the input size. 
	According to the speedup ratio achieved by XNOR-Network, we can safely assume $\gamma = 1.91$. To maximize the speedup, $q$ should be several times of $L$. In Figure \ref{fig:speedup}, we illustrate the relationship between the speedup ratio and $q$, $L$. It shows that we can obtain higher speedup ratio by increasing $q$ or $L$.
	
	Table \ref{table:efficiency} compares the speedup ratio achieved by different methods, in which parameters are fixed as: $\gamma=1.91, L = 64$ and $q = c \times h \times w = 2304$\footnote{\scriptsize For the majority of convolutional layer in ResNet{\cite{ResNet}} architecture, it's kernel size is $3 \times 3$ and input channel size is 256, so we fix $q = 256 \times 3^2 = 2304$.}. When real-valued inputs with either binary or ternary weight, the MAC operation can be replaced by only addition and subtraction, and achieving $\sim 2\times$ speedup \cite{XNOR}. While, the methods which both weights and inputs are quantized archive high speedup ($\ge 15\times$) by using binary operation. Specifically,  more bits used by weights or inputs, the lower speedup ratio is but getting the lower the approximation error. Using our approach, we gain $40\times$ theoretical speedup ratio, which is $11\times$ higher than HORQ. More details can also be observed in the \textbf{Supplementary Material}.
	
	\textbf{Remark. Why not use ternary weights with binary inputs:} Actually, a ternary (2-bit) weights network with binary inputs uses as the same scheme as TBN to accelerate CNN, but requires twice as much storage space as TBN. Since TBN has higher compression rate, we choose the TBN from these two equivalent approaches.
	
	
	\begin{figure}[t]
		\centering
		\subfigure[]{
			\label{fig:speedup}
			\includegraphics[width=0.3\linewidth]{pics/speedup.eps}
		}
		\subfigure[]{
			\label{fig:delta}
			\includegraphics[width=0.3\linewidth]{pics/sparsity.eps}
		}
		\subfigure[]{
			\label{fig:delta_conv3}
			\includegraphics[width=0.3\linewidth]{pics/delta_conv3.eps}
		}
		\caption{(a) The relationship between speedup ratio and $q$ under different $L$; (b) The classification accuracy with varying $\Delta = t \times \mathrm{E}(|\mathbf{I}|)$, \ie sparsity of ternary inputs. The percentage stacked histogram shows the percentage of the average number of -1, 0, 1 w.r.t. the inputs of the second convolutional layer; (c) The classification accuracy and percentage stacked histogram w.r.t. the inputs of the third convolutional layer.}
		\label{fig:speedup_delta}
		\vspace{0.5ex}
	\end{figure}
	
	\subsection{Analysis of TBN Components}
	
	\noindent\textbf{Sparsity of Ternary Inputs:} %\label{determine_Delta}
	To explore the relationship between sparsity and accuracy, we set $\Delta = t \times \mathrm{E}(|\mathbf{I}|)$ and train a \textbf{Simple Network} structure on CIFAR-10: ``32-C5 + MP3 + 32-C5 + MP3 + 64-C5 + AP3 + 10FC + Softmax". We adopt this kind of structure because of its simplicity and flexibility for the performance comparison. The learning strategy is the same as VGG-7 in Section \ref{vgg7}. The classification accuracies with different degrees of sparsities are shown in Figure \ref{fig:delta} and \ref{fig:delta_conv3}. As can been seen from the two figures, when $t$ grows from 0 (which is the case of XNOR-Networks) to 0.4, both the number of zeros and accuracy increase accordingly. However, when $t$ further increases, the model capacity is reduced and the error rate is increased quickly. Therefore, we choose $t=0.4$ in our experiments, which is in line with our optimization procedure in Section \ref{sec:ternary}. 
	% When $t = 0.4$, the percentage of -1 for the inputs of first TB-Convolution is near 0.5, which is not balanced. We think this is caused by ReLU nonlinear activation layer. There is a similar behavior in the second TB-Convolution
	
	\begin{table}[t]
		\caption{(a) The classification performance of TBN while using non-linear layers with different activation functions (on the CIFAR-10 dataset). ``None" denotes that we don't use non-linear layer; (b) The comparison of accuracies (\%) on CIFAR-10 after quantizing the inputs of first/last convolutional layers. ~\cmark~ indicates that we quantize the first/last layers, and ~\xmark~ indicates that we use full-precision inputs and weights.}
		\vspace{-0.3cm}
		\subtable[]{
			\centering
			\label{table:nonlinear}
			\resizebox{0.45\textwidth}{!}{
				\begin{tabular}{c|cccc}
					\toprule
					\multirow{2}{*}{Base Networks} & \multicolumn{4}{c}{Non-Linear Layers}\\
					\cline{2-5}
					& None & ReLU & Sigmoid & PReLU \\
					\midrule
					ResNet-20 & 81.36  & 82.15 & 79.12 & 84.34 \\
					VGG-7     & 89.49 & 90.85 & 89.78 & 90.10 \\
					Simple Network    & 75.92 & 81.21 & 78.67 & 81.14 \\
					\bottomrule
				\end{tabular}
			}
		}
		\subtable[]{	
			\centering
			\label{table:first_last_layer}
			\resizebox{0.45\textwidth}{!}{
				\begin{tabular}{cc|cccc}
					\toprule
					%\multicolumn{2}{c|}{Quantized?} & \multicolumn{4}{|c}{Accuracy(\%)} \\
					%\cline{3-6}
					First& Last & XNOR & TBN & BWN & HORQ\\
					\hline
					\xmark & \xmark & 79.11 & 81.21 & 82.66 & 81.12\\
					\xmark & \cmark	& 71.64 & 76.88 & 81.86 & 76.64\\
					\cmark & \xmark & 62.85 & 65.57 & 76.61 & 68.69\\
					\cmark & \cmark & 52.41 & 58.86 & 73.66 & 62.55\\
					\hline
					\multicolumn{2}{c|}{Full-Precision} & \multicolumn{4}{c}{85.51} \\
					\bottomrule
				\end{tabular}
			}
		}
		\vspace{-0.3cm}
	\end{table}
	
	\noindent\textbf{Effect of Activation Function:}
	Here, we explore the influence of different activation functions on our TBN framework. Specifically, we incorporate three non-linear activation functions, \ie ReLU, Sigmoind and PReLU. `Simple Network' in the table indicates the simple base network architecture used in the above paragraph. As shown in Table \ref{table:nonlinear}, the accuracy can be improved when using the non-linear activation functions, and using PReLU could achieve the best performance. However, the improvement is subtle, mainly because the ternary quantization function in our TBN already plays the role of activation function.
	
	\noindent\textbf{Quantizing the First/Last Layer?}
	As shown in our framework, we avoid the quantization step on the first and last layers of the networks. The reasons are two-fold: Firstly, the inputs of the first layer have much fewer channels (\ie $c = 3$), thus the speedup ratio in efficiency is not considerably high. Secondly, if the inputs of the first or last layer are quantized, the performance will drop significantly, which can be seen from Table \ref{table:first_last_layer}. Note that all the results here are obtained based on the previously mentioned Simple Network. As we can see from the table, the accuracies of the four networks decrease consistently by a large margin after quantizing their first/last layers, and the performance drop is especially obvious when the first layer is quantized.
	
	
	
	\section{Conclusion}
	\vspace{-2ex}
	In this paper, we for the first time incorporated binary network weights and ternary layer-wise inputs as a lightweighted approximation to standard CNNs. We claim the ternary inputs along with the binary weights can provide an optimal tradeoff between memory, efficiency and performance. An accelerated ternary-binary matrix multiplication that employs highly efficient \textbf{XOR}, \textbf{AND} and \textbf{bitcount} operations was introduced in TBN, which achieved $\sim 32\times$ memory saving and $40\times$ speedup over its full-precision CNN counterparts. TBN demonstrated its consistent effectiveness when applied to various CNN architectures on multiple  datasets of different scales, and it also outperformed the state-of-the-art XNOR-Network by up to 5.5\% (top-1 accuracy) on the ImageNet classification task, and up to 4.4\% (mAP score) on the PASCAL VOC object detection task.
	
	\clearpage
	
	\bibliographystyle{splncs}
	\bibliography{egbib}
	
	% 	\clearpage
	% 	\section{Supplementary}
	% 	Although the main paper stands on its own, it is still
	% 	worthwhile showing more model details and experimental
	% 	results. In this supplementary document, we provide:
	% 	\begin{itemize}
	% 		\item Attention model details
	% 		\item More experiments on CIFAR-10 
	% 		\item How to further accelerate TBN? 
	% 	\end{itemize}
	% 	\subsection{Attention Model Details}
	
	%  \begin{table}
	% 	\centering
	% 	\caption{This table shows how to compute the dot product whose operands' length is 1 according to Equation \eqref{eq:tbn}.}
	% 	\begin{tabular}{|c|c|cc|c|c|c|c|c|}
	% 		\hline
	%         \multicolumn{9}{|c|}{$\alpha \vec{b} \cdot \vec{t} = \alpha(c_t - 2 \bitcount((\vec{b}~\XORop~\vec{t}')~\ANDop~\vec{t}'')), \alpha=1$} \\
	%         \hline
	% 		$\vec{b}$ & $\vec{t}$ & $\vec{t}'$ & $\vec{t}''$ & $c_t$ & 
	% 		\tabincell{c}{$x_1 =$ \\ $\vec{b}~\XORop~\vec{t}'$} & 
	% 		\tabincell{c}{$x_2 =$ \\ $x_1~\ANDop~\vec{t}''$} & 
	% 		$c_t - 2\bitcount(x_2)$ & $\vec{b} \cdot \vec{t}$\\
	% 		\hline
	% 		\multirow{3}{*}{1}  
	% 		& 1  & 1  & 1 & 1 & T~$\XORop$~T=F & F~$\ANDop$~T=F & 1 - 0 = 1  & $= 1 \times 1  $ \\
	% 		& 0  & -1 & 0 & 0 & T~$\XORop$~F=T & T~$\ANDop$~F=F & 0 - 0 = 0  & $= 1 \times 0  $ \\
	%         & -1 & -1 & 1 & 1 & T~$\XORop$~F=T & T~$\ANDop$~T=T & 1 - 2 = -1 & $= 1 \times -1 $ \\
	% 		\hline
	% 		\multirow{3}{*}{-1} 
	% 		& 1  & 1  & 1 & 1 & F~$\XORop$~T=T & T~$\ANDop$~T=T & 1 - 2 = -1 & $= -1 \times 1 $ \\
	% 		& 0  & -1 & 0 & 0 & F~$\XORop$~F=F & F~$\ANDop$~F=F & 0 - 0 = 0  & $= -1 \times 0 $ \\
	% 		& -1 & -1 & 1 & 1 & F~$\XORop$~F=F & F~$\ANDop$~T=F & 1 - 0 = 1  & $= -1 \times -1$ \\
	% 		\hline
	% 	\end{tabular}
	% \end{table}
	
	% 	\subsection{More Experiments on CIFAR-10}
	%  	We evaluate our proposed method on more architectures 
	%  	\footnote{Our implementation is followed by \url{https://github.com/kuangliu/pytorch-cifar/tree/master/models}}
	%  	, and compare the performance with full-precision networks, XNOR-Networks and HORQ networks. The train configurations are same as VGG-7 in \ref{vgg7}. We report the accuracy associated with the best validation epoch.
	
	%  \begin{table}
	%      \begin{center}
	%      \caption{This table compares the results of several models on CIFAR-10 dataset among full-precision networks, Ternary-Binary Networks and XNOR-Networks.}
	%      \label{table:cifar10}
	%      \begin{tabular}{ccccc}
	%      \toprule
	%      architectures                                & full-precision  & XNOR & HORQ & \textbf{TBN}    \\
	%      \midrule
	%      VGG-19 \cite{VGG}                            & 92.23 & 84.78 & & \textbf{89.52}  \\ 
	%      ResNet-18  \cite{ResNet}                     & 93.37 & 90.25 & & \textbf{91.05}  \\ 
	%      ResNeXt-29(2x64d) \cite{xie2016aggregated}   & 92.26 & 85.93 & & \textbf{89.06}  \\ 
	%      DenseNet-121 \cite{huang2016densely}         & 93.06 & 90.01 & & \textbf{91.65}  \\ 
	%      % PreActResNet-18\cite{he2016identity}         & 92.83 & 82.66 & & \textbf{89.81}  \\ 
	%      \bottomrule
	%      \end{tabular}
	%      \end{center}
	%  \end{table}
	
	% 	Table {\ref{table:cifar10}} compares the classification accuracy of Ternary-Binary Networks with baselines on various architectures. In all architectures, our method is better than XNOR-Network.% although it is worse than full-precision networks. 
	% 	\subsection{Further Accelerate TBN}
	%  	In this section we discuss how to get faster and/or more accurate CNNs based on Ternary-Binary networks.
	
	% 	 \subsubsection{Hardware Design}
	% 	 	As we mentioned earlier, TBN can achieve high speedup and compression ratios on the general computation platform such on CPU, GPU, ARM. 
	% 	 	But we can design our own hardware architecture on the FPGA or ASIC board to obtain higher speedup ratio.
	% 	 	There are two strategies.
	% 	 	The first method is to design the hardware to perform more bits (\eg 1024, 2048) binary operations in a single clock cycle.
	% 	 	And another way is to design the hardware can execute three different binary operations $\ANDop$, $\XORop$ and $\bitcount$ simultaneously in a single clock cycle. 
	% 	 	Maybe, on our own hardware, we can execute the dot product in a one clock cycle. If so, the complexity of Ternary-Binary matrix multiplication can be thought of as $O(n^2)$.
	% 	 	In addition, XNOR-Networks and HORQ can accelerate CNNs by designing hardware using similar strategies.
	
	% 	 \subsubsection{Combine with Other Efficient Structure-based Networks}
	
	%  	Our proposed approach is based on fixed-point quantization. 
	%  	But there is another kind of methods to accelerate and compress networks by using the more efficient network architectures.
	%  	For example, by using $1 \times 1$ convolution layer, SqueezeNet {\cite{SqueezeNet}} achieved AlexNet-level accuracy on ImageNet with 50x fewer parameters. 
	%  	For mobile and embedded vision applications, Andrew G. Howard Menglong \etal {\cite{MobileNet}} proposed a class of efficient models called MobileNets.
	%  	MobileNets build lightweight deep neural networks by using depthwise separable convolutions. 
	%  	ShuffleNet {\cite{ShuffleNet}} is an extremely computation efficient CNN architecture. It utilizes pointwise group convolution and channel shuffle, and is designed for very limited computing power (\eg 10-150 MFLOPs) devices. 
	
	%  	Because these approaches and our method are two types of methods that do not conflict with each other, combining TBN with these efficient structure-based networks is a good choice. 
	%  	For example, MobileNet spends 95\% of its computation time and has 75\% of the parameters in the pointwise convolutions (\ie $1 \times 1$ convolutional layer).
	%  	If we applied our method on those pointwise convolutions, in our analysis, we can get $11.7 \times$ computation reduction and $3.3 \times$ storage saving after our approach is applied to MobileNet. 
	%  	And because the depthwise convolutional layers use full-precision weights and inputs, the combined model does not perform very poorly.
	
	%  	Depthwise separable convolution have been proposed in MobileNet {\cite{MobileNet}}. A depthwise separable convolution splits a standard convolutional layer into a depthwise convolutional layer for filtering and  a pointwise convolution ($1 \times 1$ convolutional layer) for combining. That is, in depthwise convolution, a single filter is applied to each input channel. And the pointwise convolution combine the outputs of depthwise convolution by using a $1 \times 1$ convolutional layer. Because of this efficient factorization, the computation and model size is drastically reduced. Specifically, we get a reduction in computation of:
	%  	\begin{equation} \label{speed_dsc}
	%  	\frac{h_{out} w_{out} c_{in} h w + h_{out} w_{out} c_{out} c_{in}}{h_{out} w_{out} c_{out} c_{in} h w} = \frac{1}{c_{out}} + \frac{1}{h w}
	%  	\end{equation}
	
	%  	MobileNet spends 95\% of its computation time and has 75\% of the parameters in $1 \times 1$ convolutions. If we apply our method to $1 \times 1$ convolutional layers, MobileNet can further reduce the number of parameters and computation cost. Thus, after $1 \times 1$ convolution is replaced by $1 \times 1$ ternary-binary convolutional layer, we can get the reduction in computation (for simplicity, assuming $c_{in}$ is divisible by $L$):
	%  	\begin{equation}
	%  	\begin{array}{ll}
	%  	&\displaystyle \frac{h_{out} w_{out} c_{in} h w + 2 h_{out} w_{out} c_{out} + \frac{3}{\gamma} h_{out} w_{out} c_{out}  \frac{c_{in}}{L} }{h_{out} w_{out} c_{out} c_{in h w}} \\
	%  	& \\
	%  	= &\displaystyle \frac{1}{c_{out}} + \frac{2}{c_{in} h w} + \frac{3}{\gamma L h w}
	%  	\end{array}
	%  	\end{equation}
	%  	and the reduction in storage :
	%  	\begin{equation}
	%  	\frac{c_{in} h  w + \frac{1}{32}c_{out} c_{in}}{c_{out} c_{in} h w} = \frac{1}{c_{out}} + \frac{1}{32 h w}
	%  	\end{equation}
	%  	When $c_{in} = c_{out} = 256$, $h = w = 3$, $L = 64$, $\gamma = 1.91$, the speedup is $133.3\times$ and storage reduce $135.5 \times$. Table \ref{table:cmp_MobileNet} compares the computation and number of parameters between origin architecture in MobileNet and the ternary-binary convolution version of MobileNet (TBN-MobileNet). It shows that we can get $11.7 \times$ computation reduction and $3.3 \times$ storage saving after our approach is applied to MobileNet.
	
	%  	\begin{table}
	%  		\caption{This table compares the computation and number of parameters between MobileNet and TBN-MobileNet.}
	%  		\label{table:cmp_MobileNet}
	%  		\begin{center}
	%  			\begin{tabular}{ccc}
	%  				\toprule
	%  				Model & Million MACs & Million Parameters \\
	%  				\midrule
	%  				MobileNet & 569 & 4.2 \\
	%  				TBN-MobileNet & 48.5 & 1.3 \\
	%  				\bottomrule
	%  			\end{tabular}
	%  		\end{center}
	%  	\end{table}
	
	%
	%\textbf{Structure-based methods} 
	%The redundancy of CNNs is caused by the structure of the network itself. 
	%So changing the structure of CNNs is widely adopted in many efficient network architectures.
	%ResNet {\cite{ResNet}} modified the building block as a more economical bottleneck designs.  
	%With $1 \times 1$ convolution layer, SqueezeNet {\cite{SqueezeNet}} achieved AlexNet-level accuracy on ImageNet with 50x fewer parameters. 
	%For mobile and embedded vision applications, Andrew G. Howard Menglong \etal {\cite{MobileNet}} proposed a class of efficient models called MobileNets.
	%MobileNets build lightweight deep neural networks by using depthwise separable convolutions. ShuffleNet {\cite{ShuffleNet}} is an extremely computation efficient CNN architecture. 
	%It utilizes pointwise group convolution and channel shuffle, and is designed for very limited computing power (\eg 10-150 MFLOPs) devices. 
	%Local binary convolutional neural networks (LBCNN) {\cite{LocalBCNN}} used a set of fixed sparse predefined binary convolutional filters to achieve significant learnable parameter reduction. 
	%Lookup-based convolutional neural network (LCNN){\cite{LCNN}} uses few lookups to encode convolutions to a dictionary that is trained to cover the space of weights in CNNs, and offers dramatic speed ups at inference. 
	%At the same time, LCNN enables efficient training, \ie few-shot learning and few-iteration learning.
	%
	%Both the structure-based methods and the fixed-point quantization based methods require to train the network from scratch. These two lines of methods are orthogonal, we can use both kinds of methods in a network at the same time. 
	%% We propose a method based on fixed-point quantization and combine it with one of the structure-based methods.
	%
	%\textbf{Others} 
	
	%\section{Combine with Other Efficient Networks}
	%
	%\subsection{MobileNets} 
	%
	
	%
	%\subsection{Local Binary Convolutional Neural Networks}
	%
	%Juefei-Xu \etal \cite{LocalBCNN} proposed Local binary convolution (LBC). The LBC layer is based on a set of fixed, predefined, non-learnable binary convolutional filters, a non-linear activation function and a set of learnable linear weights. The LBC layer afford $9 \times$ to $169 \times$ learnable parameters reduction. But the inputs of a binary convolutional layer is real-valued, just same as BWN {\cite{XnorNet}}. We can use ternary-binary convolution to replace predefined binary convolutional layer so that LBC layer can afford a large speedup while keeping significant learnable parameter reductions.
	%
	%A local binary convolution consists of $m \cdot c_{in} \cdot h \cdot w $ fixed weights and $m \cdot c_{out}$ learnable parameters(corresponding to the $1 \times 1$ convolution), where $m$ is the number of fixed convolutional filters. The number of MACs required by a LBC layer is:
	%\begin{equation}
	%(h_{out} \cdot w_{out}) \cdot m \cdot (c_{in} \cdot h \cdot w) + (h_{out} \cdot w_{out}) \cdot c_{out} \cdot m
	%\end{equation}
	%Compared with standard convolutional layer, the computation of a LBC layer is increased when $m = c_{out}$. Of course, LBC layer can be accelerated by using Add/Minus operations to replace Multiplication operations in predefined fixed binary convolution layer. But during inference, the reduction of computational complexity in LBCNN (Local Binary Convolutional Neural Networks) is limited.
	%
	%We use TB-Convolution as an alternative of the predefined fixed binary convolution layer, and this kind of LBC layer is called TBN-LBC layer. The ratio of computation in CNN and TBN-LBC is:
	%\begin{equation}
	%\frac{c_{out}  c_{in}  h  w}{2 m + \frac{3m}{\gamma} \lceil  \frac{c_{in}  h  w}{L} \rceil + c_{out}  m}
	%\end{equation}
	%When $m = c_{in} = c_{out} = 256, h = w = 3, L = 64, \gamma=1.91$, the ratio is $7.3$. It shows that the computational cost can be reduced $7.3 \times$ and the number of learnable parameters can be reduced $9\times$.d
\end{document}
