% last updated in April 2002 by Antje Endemann
% Based on CVPR 07 and LNCS, with modifications by DAF, AZ and elle, 2008 and AA, 2010, and CC, 2011; TT, 2014; AAS, 2016

\documentclass[runningheads]{llncs}
\usepackage{graphicx}
\usepackage{amsmath,amssymb} % define this before the line numbering.
\usepackage{ruler}
\usepackage{color}

\usepackage[pagebackref=true,breaklinks=true,letterpaper=true,colorlinks,bookmarks=false]{hyperref}

\usepackage[width=122mm,left=12mm,paperwidth=146mm,height=193mm,top=12mm,paperheight=217mm]{geometry}

\usepackage{cite}

\newcommand{\sign}{sign}
\newcommand{\ternary}{f_{ternary}}
\newcommand{\imTcol}{ten2mat}
\newcommand{\colTim}{mat2ten}
\newcommand{\argmin}{\mathop{\arg\min}}
\newcommand{\argmax}{\mathop{\arg\max}}
\newcommand{\bitcount}{\mathbf{bitcount}}
\newcommand{\XORop}{\mathbf{XOR}}
\newcommand{\ANDop}{\mathbf{AND}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\NA}{-}
\usepackage[normalem]{ulem}
\usepackage{subfigure}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{algorithm}  
\usepackage{algorithmic} 
\usepackage{footnote}

\usepackage{pifont}% http://ctan.org/pkg/pifont
\newcommand{\cmark}{\ding{51}}%
\newcommand{\xmark}{\ding{55}}%

\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}  

\usepackage{xspace}

\makeatletter
\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot}
\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace}

\def\eg{\emph{e.g}\onedot} 
\def\Eg{\emph{E.g}\onedot}
\def\ie{\emph{i.e}\onedot} 
\def\Ie{\emph{I.e}\onedot}
\def\cf{\emph{c.f}\onedot} 
\def\Cf{\emph{C.f}\onedot}
\def\etc{\emph{etc}\onedot} 
\def\vs{\emph{vs}\onedot}
\def\wrt{w.r.t\onedot} 
\def\dof{d.o.f\onedot}
\def\etal{\emph{et al}\onedot}
\makeatother

\renewcommand{\paragraph}{\textbf}
\addtolength{\abovecaptionskip}{-.3cm}
\addtolength{\belowcaptionskip}{-.3cm}
%
\addtolength{\parskip}{-0.02cm}
%
\addtolength{\textfloatsep}{-0.4cm}
%\addtolength{\floatsep}{-0.4cm}
\renewcommand\floatpagefraction{.9}
\renewcommand\topfraction{.9}
\renewcommand\bottomfraction{.9}
\renewcommand\textfraction{.1}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}
% Commands like \normalsize, \small and \footnotesize could change the
% length of \abovedisplayskip, \belowdisplayskip etc.
%
\expandafter\def\expandafter\normalsize\expandafter{%
	\normalsize\setlength\abovedisplayskip{3pt}}
\expandafter\def\expandafter\normalsize\expandafter{%
	\normalsize\setlength\belowdisplayskip{3pt}}

\begin{document}
	% \renewcommand\thelinenumber{\color[rgb]{0.2,0.5,0.8}\normalfont\sffamily\scriptsize\arabic{linenumber}\color[rgb]{0,0,0}}
	% \renewcommand\makeLineNumber {\hss\thelinenumber\ \hspace{6mm} \rlap{\hskip\textwidth\ \hspace{6.5mm}\thelinenumber}}
	% \linenumbers
	\pagestyle{headings}
	\mainmatter
	\def\ECCV18SubNumber{1920}  % Insert your submission number here
	
	\title{Supplementary Documents for TBN: Convolutional Neural Network with Ternary Inputs and Binary Weights} 
	
	\titlerunning{ECCV-18 submission ID \ECCV18SubNumber}
	
	\authorrunning{ECCV-18 submission ID \ECCV18SubNumber}
	
	\author{Anonymous ECCV submission}
	\institute{Paper ID \ECCV18SubNumber}
	
	
	\maketitle
	
	
	Although the main paper stands on its own, it is still worthwhile showing more model details and experimental results. In this supplementary document, we provide:
	\begin{itemize}
		\item More details for TBN. 
		\item More experiments on CIFAR-10.
		\item How to further accelerate TBN? 
	\end{itemize}
	
	\section{More details for TBN}
	
	In the main paper, the binary operations are used to accelerate dot product with a ternary vector $\vec{t}$ and a binary vector $\alpha \vec{b}$ as its operands, where $\vec{t} \in \{-1, 0, 1\}^q, \alpha \in \mathbb{R}^+, \vec{b} \in \{-1, +1\}^q$, and $q$ is the length of vector. However, we did not explain how to obtain the acceleration equation (\ie, Equation \eqref{eq:tbn}). In this section, we illustrate the derivation of Equation \eqref{eq:tbn}. And we also show how to use binary operations to accelerate the dot product whose operands are both binary or ternary.
	
	We decompose the ternary vector $\vec{t}$ into two vectors $\vec{t}'$ and $\vec{t}''$, \ie
	\begin{equation}
	t'_i = \left\{\begin{array}{rl}
	1, & t_i = 1 \\
	-1, & \mbox{otherwise}
	\end{array} \right.
	,~~
	t''_i = \left\{\begin{array}{rl}
	0, & t_i = 0 \\
	1, & \mbox{otherwise}
	\end{array} \right.
	,~~i=1,\ldots,q
	\end{equation}
	
	We use $num_{x \times y}$ to denote the number of multiplication $x \times y$ ($x \in \{-1, 0, 1\}, y \in \{-1,  1\}$) in the dot product $\vec{t} \cdot \vec{b}$. So,
	\begin{equation}
	\vec{t} \cdot \vec{b} = num_{1 \times 1} + num_{-1 \times -1} - num_{1 \times -1} - num_{-1 \times 1}
	\end{equation}
	
	Because $c_t = \bitcount(\vec{t}'')=\|\vec{t}\|_1$ is the number of non-zero elements in ternary vector $\vec{t}$, \ie $c_t = num_{1 \times 1} + num_{-1 \times -1} + num_{1 \times -1} + num_{-1 \times 1}$, we can get
	\begin{equation}
	\vec{t} \cdot \vec{b} = c_t - 2 (num_{1 \times -1} + num_{-1 \times 1})
	\end{equation}
	
	And $num_{1 \times -1} + num_{0 \times -1} + num_{-1 \times 1} = \bitcount(\vec{b}~\XORop~\vec{t}')$, so
	\begin{equation}
	num_{1 \times -1} + num_{-1 \times 1} = \bitcount((\vec{b}~\XORop~\vec{t}')~\ANDop~\vec{t}'')
	\end{equation}
	
	Thus, 
	\begin{equation} \label{eq:tbn}
	\vec{t} \cdot (\alpha \vec{b}) = \alpha (\vec{t} \cdot \vec{b}) = \alpha (c_t - 2 \bitcount((\vec{b}~\XORop~\vec{t}')~\ANDop~\vec{t}''))
	\end{equation}
	
	We illustrate the whole calculation process in Table \ref{table:tbn_example} when $q=1, \alpha=1$. 
	\begin{table}
		\centering
		\caption{This table shows how to compute the dot product whose operands' length is 1. `T' denotes logic true, and `F' is logic false.}
		\label{table:tbn_example}
		\begin{tabular}{|c|c|cc|c|c|c|c|c|}
			\hline
			\multicolumn{9}{|c|}{$\vec{t} \cdot (\alpha \vec{b}) = \alpha(c_t - 2 \bitcount((\vec{b}~\XORop~\vec{t}')~\ANDop~\vec{t}'')), \alpha=1, q=1$} \\
			\hline
			$\vec{b}$ & $\vec{t}$ & $\vec{t}'$ & $\vec{t}''$ & $c_t$ & 
			\tabincell{c}{$x_1 =$ \\ $\vec{b}~\XORop~\vec{t}'$} & 
			\tabincell{c}{$x_2 =$ \\ $x_1~\ANDop~\vec{t}''$} & 
			$c_t - 2\bitcount(x_2)$ & $\vec{b} \cdot \vec{t}$\\
			\hline
			\multirow{3}{*}{1}  
			& 1  & 1  & 1 & 1 & T~$\XORop$~T=F & F~$\ANDop$~T=F & 1 - 0 = 1  & $= 1 \times 1  $ \\
			& 0  & -1 & 0 & 0 & T~$\XORop$~F=T & T~$\ANDop$~F=F & 0 - 0 = 0  & $= 1 \times 0  $ \\
			& -1 & -1 & 1 & 1 & T~$\XORop$~F=T & T~$\ANDop$~T=T & 1 - 2 = -1 & $= 1 \times -1 $ \\
			\hline
			\multirow{3}{*}{-1} 
			& 1  & 1  & 1 & 1 & F~$\XORop$~T=T & T~$\ANDop$~T=T & 1 - 2 = -1 & $= -1 \times 1 $ \\
			& 0  & -1 & 0 & 0 & F~$\XORop$~F=F & F~$\ANDop$~F=F & 0 - 0 = 0  & $= -1 \times 0 $ \\
			& -1 & -1 & 1 & 1 & F~$\XORop$~F=F & F~$\ANDop$~T=F & 1 - 0 = 1  & $= -1 \times -1$ \\
			\hline
		\end{tabular}
	\end{table}
	
	Beside, the dot product between two binary vectors can be calculated by:
	\begin{equation} \label{eq:xnor}
	\vec{b}_1 \cdot \vec{b}_2 = q - 2 \bitcount(\vec{b}_1~\XORop~\vec{b}_2)
	\end{equation}
	where $\vec{b}_1, \vec{b}_2 \in \{-1, +1\}^q$. Equation \eqref{eq:xnor} is simplified from Equation \eqref{eq:tbn}, according to $c_t = q$ and $\forall i, \vec{t}''_i = 1, \alpha = 1$.
	
	However, the dot product between two ternary vectors is more complex. Let us assume $\vec{t}_1, \vec{t}_2 \in \{-1, 0, +1\}^q$. So the number of multiplication $1 \times 1$ is:
	\begin{equation}
	num_{1 \times 1} = |\{i \mid \vec{t}_{1i} = 1~\mbox{and}~\vec{t}_{2i} = 1 \}| = \bitcount(\vec{t}'_1~\ANDop~\vec{t}'_2)
	\end{equation}
	where 
	$$
	t'_{1i} = \left\{\begin{array}{rl}
	1, & t_{1i} = 1 \\
	0, & \mbox{otherwise}
	\end{array} \right.
	,~~
	t'_{2i} = \left\{\begin{array}{rl}
	1, & t_{2i} = 1 \\
	0, & \mbox{otherwise}
	\end{array} \right.
	, i = 1,\ldots,q
	$$
	Similarly,
	\begin{equation}
	\begin{array}{lll}
	num_{1 \times -1} &= |\{i \mid \vec{t}_{1i} = 1~\mbox{and}~\vec{t}_{2i} = -1 \}| &= \bitcount(\vec{t}'_1~\ANDop~\vec{t}''_2) \\
	
	num_{-1 \times 1} &= |\{i \mid \vec{t}_{1i} = -1~\mbox{and}~\vec{t}_{2i} = 1 \}| &= \bitcount(\vec{t}''_1~\ANDop~\vec{t}'_2) \\
	
	num_{-1 \times -1} &= |\{i \mid \vec{t}_{1i} = -1~\mbox{and}~\vec{t}_{2i} = -1 \}| &= \bitcount(\vec{t}''_1~\ANDop~\vec{t}''_2)
	\end{array}
	\end{equation}
	where,$$
	t''_{1i} = \left\{\begin{array}{rl}
	1, & t_{1i} = -1 \\
	0, & \mbox{otherwise}
	\end{array} \right.
	,~~
	t''_{2i} = \left\{\begin{array}{rl}
	1, & t_{2i} = -1 \\
	0, & \mbox{otherwise}
	\end{array} \right.
	, i = 1,\ldots,q
	$$ 
	So, 
	\begin{equation}
	\begin{array}{ll}
	\vec{t}_1 \cdot \vec{t}_2  & = num_{1 \times 1} + num_{-1 \times -1} - num_{1 \times -1} - num_{-1 \times 1} \\
	& = \bitcount(\vec{t}'_1~\ANDop~\vec{t}'_2) + \bitcount(\vec{t}''_1~\ANDop~\vec{t}''_2) \\
	& ~~ - \bitcount(\vec{t}'_1~\ANDop~\vec{t}''_2) - \bitcount(\vec{t}''_1~\ANDop~\vec{t}'_2)
	\end{array}
	\end{equation}
	It shows that $8n$ binary operations are required by the dot product between two ternary vectors. So the speedup ratio of convolutional layer with ternary input and ternary weights (\eg, GXNOR\cite{GXNOR}, TNN\cite{TNN}) is only $15\times$.
	
	\section{More Experiments on CIFAR-10}
	We evaluate our proposed method on more architectures (CIFAR-10 dataset)
	\footnote{Our implementation is followed by \url{https://github.com/kuangliu/pytorch-cifar/tree/master/models}}
	, and compare the performance with full-precision networks, XNOR-Networks. We run the Adam training algorithm for 200 epochs with a mini-batch of 200. And the learning rate starts at 0.001 and is scaled by 0.1 every 50 epochs. We report the accuracy associated with the best validation epoch.
	
	Table {\ref{table:cifar10}} compares the classification accuracy of Ternary-Binary Networks with baselines on various architectures. In all architectures, our method is better than XNOR-Network.% although it is worse than full-precision networks. 
	
	\begin{table}
		\begin{center}
			\caption{This table compares the results of several models on CIFAR-10 dataset among full-precision networks, Ternary-Binary Networks and XNOR-Networks.}
			\label{table:cifar10}
			\begin{tabular}{ccccc}
				\toprule
				architectures & VGG-19 \cite{VGG}  & ResNet-18  \cite{ResNet} & ResNeXt-29(2x64d) \cite{xie2016aggregated}  & DenseNet-121 \cite{huang2016densely}  \\
				\midrule
				full-precision & 92.23 & 93.37 & 92.26 & 93.06 \\
				XNOR-Networks & 84.78 & 90.25 & 85.93 & 90.01 \\
				\midrule
				\textbf{TBN} & 89.52 & 91.05 & 89.06 & 91.65 \\
				\bottomrule
			\end{tabular}
			%		\begin{tabular}{cccc}
			%		\toprule
			%		architectures                                & full-precision  & XNOR  & \textbf{TBN}    \\
			%		\midrule
			%		VGG-19 \cite{VGG}                            & 92.23 & 84.78 &  \textbf{89.52}  \\ 
			%		ResNet-18  \cite{ResNet}                     & 93.37 & 90.25 &  \textbf{91.05}  \\ 
			%		ResNeXt-29(2x64d) \cite{xie2016aggregated}   & 92.26 & 85.93 &  \textbf{89.06}  \\ 
			%		DenseNet-121 \cite{huang2016densely}         & 93.06 & 90.01 &  \textbf{91.65}  \\ 
			%		% PreActResNet-18\cite{he2016identity}         & 92.83 & 82.66 & & \textbf{89.81}  \\ 
			%		\bottomrule
			%	\end{tabular}
		\end{center}
	\end{table}
	
	\section{Accelerate TBN Further}
	In this section we discuss how to get faster CNNs based on Ternary-Binary network, \ie, designing efficient hardware implement or combining TBN with other efficient structure-based networks.
	
	\subsection{Hardware Design}
	TBN can achieve high speedup and compression ratios on the general computation platform such on CPU, GPU, ARM. 
	But we can design our own hardware architecture on the FPGA or ASIC board to obtain higher speedup ratio.
	There are two strategies.
	The first method is to design the hardware to perform more bits (\eg 1024, 2048) binary operations in a single clock cycle.
	And another way is to design the hardware can execute three different binary operations $\ANDop$, $\XORop$ and $\bitcount$ simultaneously in a single clock cycle. 
	Maybe, on our own hardware, we can execute the dot product in a one clock cycle. If so, the complexity of Ternary-Binary matrix multiplication can be thought of as $O(n^2)$.
	In addition, XNOR-Networks and HORQ can accelerate CNNs by designing hardware using similar strategies.
	
	\subsection{Combine with Other Efficient Structure-based Networks}
	
	Our proposed approach is based on fixed-point quantization. 
	But there is another pipeline of methods to accelerate and compress networks by using the more efficient network architectures.
	For example, by using $1 \times 1$ convolution layer, SqueezeNet {\cite{SqueezeNet}} achieved AlexNet-level accuracy on ImageNet with 50x fewer parameters. 
	For mobile and embedded vision applications, Howard \etal {\cite{MobileNet}} proposed a class of efficient models called MobileNets.
	MobileNets build lightweight deep neural networks by using depthwise separable convolutions. 
	ShuffleNet {\cite{ShuffleNet}} is an extremely computation efficient CNN architecture. It utilizes pointwise group convolution and channel shuffle, and is designed for very limited computing power (\eg 10-150 MFLOPs) devices. 
	
	Because these approaches and our method are two types of methods that do not conflict with each other, combining TBN with these efficient structure-based networks is a good choice. 
	%For example, MobileNet spends 95\% of its computation time and has 75\% of the parameters in the pointwise convolutions (\ie $1 \times 1$ convolutional layer).
	%If we applied our method on those pointwise convolutions, in our analysis, we can get $11.7 \times$ computation reduction and $3.3 \times$ storage saving after our approach is applied to MobileNet. 
	%And because the depthwise convolutional layers use full-precision weights and inputs, the combined model does not perform very poorly.
	MobileNet {\cite{MobileNet}} proposed the depthwise separable convolution. A depthwise separable convolution splits a standard convolutional layer into a depthwise convolutional layer for filtering and  a pointwise convolution ($1 \times 1$ convolutional layer) for combining. That is, in depthwise convolution, a single filter is applied to each input channel. And the pointwise convolution combine the outputs of depthwise convolution by using a $1 \times 1$ convolutional layer. Because of this efficient factorization, the computation and model size is drastically reduced. Specifically, we get a reduction in computation of:
	\begin{equation} \label{speed_dsc}
	\frac{h_{out} w_{out} c h w + h_{out} w_{out} c_{out} c}{h_{out} w_{out} c_{out} c h w} = \frac{1}{c_{out}} + \frac{1}{h w}
	\end{equation}
	where the shape of filter is $c \times h \times w$, and the shape of output to the convolutional layer is $c_{out} \times h_{out} \times w_{out}$.
	
	MobileNet spends 95\% of its computation time and has 75\% of the parameters in $1 \times 1$ convolutions. If we apply our method to $1 \times 1$ convolutional layers, MobileNet can further reduce the number of parameters and computation cost. Thus, after $1 \times 1$ convolution is replaced by $1 \times 1$ ternary-binary convolutional layer, we can get the reduction in computation (for simplicity, assuming $c$ is divisible by $L$):
	\begin{equation}
	\begin{array}{ll}
	&\displaystyle \frac{h_{out} w_{out} c h w + h_{out} w_{out} c_{out} + \frac{3}{\gamma} h_{out} w_{out} c_{out}  \frac{c}{L} }{h_{out} w_{out} c_{out} c h w} \\
	& \\
	= &\displaystyle \frac{1}{c_{out}} + \frac{1}{c h w} + \frac{3}{\gamma L h w}
	\end{array}
	\end{equation}
	and the reduction in storage :
	\begin{equation}
	\frac{c h  w + \frac{1}{32}c_{out} c}{c_{out} c h w} = \frac{1}{c_{out}} + \frac{1}{32 h w}
	\end{equation}
	When $c = c_{out} = 256$, $h = w = 3$, $L = 64$, $\gamma = 1.91$, the speedup is $141\times$ and storage reduce $136 \times$. Table \ref{table:cmp_MobileNet} compares the computation and number of parameters between origin architecture in MobileNet and the ternary-binary convolution version of MobileNet (TBN-MobileNet). It shows that we can get $11.7 \times$ computation reduction and $3.3 \times$ storage saving after our approach is applied to MobileNet.
	
	\begin{table}
		\caption{This table compares the computation and number of parameters between MobileNet and TBN-MobileNet. When calculating the computation, we convert the number of binary operations to the number of equivalent MACs for comparison.}
		\label{table:cmp_MobileNet}
		\begin{center}
			\begin{tabular}{c|cc|cc}
				\toprule
				Model & Million MACs & speedup ratio &Million Parameters  & storage saving\\
				\midrule
				MobileNet & 569 & $1\times$ & 4.2 & $1\times$\\
				TBN-MobileNet & 48.5 & $11.7\times$  & 1.3 & $3.3\times$\\
				\bottomrule
			\end{tabular}
		\end{center}
	\end{table}
	
	
	\bibliographystyle{splncs}
	\bibliography{egbib}
\end{document}
