%\documentclass{acmsiggraph}                     % final
 %\documentclass[annual]{acmsiggraph}
%\documentclass[annualconference]{acmsiggraph}  % final (annual conference)
\documentclass[preprint]{acmsiggraph}            % review
%\documentclass[widereview]{acmsiggraph}        % wide-spaced review
%\documentclass[preprint]{acmsiggraph}          % preprint

%% Uncomment one of the five lines above depending on where your paper is
%% in the conference process. ``review'' and ``widereview'' are for review
%% submission, ``preprint'' is for pre-publication, and ``final'' is for
%% the version to be printed. The ``final'' variant will accept the 
%% ``annualconference'' parameter, which changes the height of the space
%% left clear for the ACM copyright information.

%% The 'helvet' and 'times' packages define the typefaces used for
%% serif and sans serif type in this document. Computer Modern Roman 
%% is used for mathematics typesetting. The scale factor is set to .92
%% to bring the sans-serif type in line with the serif type.

\usepackage[scaled=.92]{helvet}
\usepackage{times}

\usepackage{multirow}
\usepackage{algorithm,algorithmic}
\usepackage{multirow}

%http://tex.stackexchange.com/questions/3445/latex-tables-how-do-i-make-bold-horizontal-lines-typically-hline
\makeatletter
\def\hlinewd#1{%
\noalign{\ifnum0=`}\fi\hrule \@height #1 %
\futurelet\reserved@a\@xhline} 
\makeatother

%% The 'graphicx' package allows for the inclusion of EPS figures.

\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{wrapfig}
\usepackage{overpic}

%% use this for zero \parindent and non-zero \parskip, intelligently.

\usepackage{parskip}

%% Optional: the 'caption' package provides a nicer-looking replacement
%% for the standard caption environment. With 'labelfont=bf,'textfont=it',
%% caption labels are bold and caption text is italic.

\usepackage[labelfont=bf,textfont=it]{caption}
%\usepackage{subfig}

\usepackage{subcaption}

%\newcommand{\mydraft}{true}
\newcommand{\mydraft}{false}
%\interdisplaylinepenalty=2500

\newcommand{\magicfigure}[8]{
% #1 is width of the small guys
% #2 is the height of the big guy
% #3 is horizontal pad
% #4-#9 are figures
{\hfill\hbox{\includegraphics[height=#2,draft=\mydraft]{#4}\hspace{#3}\vbox to #2{\hbox{\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#3}\includegraphics[width=#1,draft=\mydraft]{#6}}\vfill\hbox{\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{#3}\includegraphics[width=#1,draft=\mydraft]{#8}}}}\hfill}
}

\newcommand{\supermagicfigure}[9]{
% #1 is width of the small guys
% #2 is the height of the big guy
% #3 is horizontal pad
% #4-#9 are figures
{\hfill\hbox{\includegraphics[height=#2,draft=\mydraft]{#3}\hspace{.01in}\vbox to #2{\hbox{\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#6}}\vfill\hbox{\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#8}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#9}}}}\hfill}
}

\newcommand{\fourfigure}[6]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}}}
}

\newcommand{\fourfigureline}[6]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\hspace{.1in}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}}}
}


\newcommand{\fivefigure}[7]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#7}}}
}

\newcommand{\sixfigure}[8]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#8}}}
}


\newcommand{\threefigure}[5]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#5 is figures
{\hbox{\hspace{.1in}\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}}}
}

\newcommand{\twofigure}[4]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#4 are figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}}}
}

\newcommand{\myname}[1]{\hspace{-.15in}#1}

%% If you are submitting a paper to the annual conference, please replace 
%% the value ``0'' below with the numeric value of your OnlineID. 
%% If you are not submitting this paper to the annual conference, 
%% you may safely leave it at ``0'' -- it will not be included in the output.

%\TOGonlineid{46}
\TOGvolume{0}
\TOGnumber{0}
\TOGarticleDOI{1111111.2222222}
\TOGprojectURL{}
\TOGvideoURL{}
\TOGdataURL{}
\TOGcodeURL{}

%% Paper title.

%no abreviations
\title{A Robust and Fast Leaf Identification System}

%% Author and Affiliation (single author).

%%\author{Roy G. Biv\thanks{e-mail: roy.g.biv@aol.com}\\Allied Widgets Research}

%% Author and Affiliation (multiple authors).

%\author{Roy G. Biv\thanks{e-mail: roy.g.biv@aol.com}\\ Starbucks Research %
%\and Ed Grimley\thanks{e-mail:ed.grimley@aol.com}\\Nigel Mansell\thanks{nigelf1@msn.com}\\ Grimley Widgets, Inc. %
%\and Martha Stewart\thanks{e-mail:martha.stewart@marthastewart.com}\\ Martha Stewart Enterprises \\ Microsoft Research}

\author{
    Ning Jin\thanks{e-mail: njin19@stanford.com}\\ Stanford University \and 
    Wenlong Lu\thanks{e-mail:wenlongl@stanford.com}\\ Stanford University
}

%\author{Matthew Cong\thanks{e-mail: \{mdcong,mikebao,rfedkiw\}@stanford.edu}\\Stanford University\\Industrial Light + Magic%
%\and \myname{Michael Bao$^*$}\\Stanford University
%\and \myname{Ronald Fedkiw$^*$}\\ \myname{Stanford University}\\ \myname{Industrial Light + Magic}}
%
\pdfauthor{Ning Jin, Wenlong Lu}

%% Keywords that describe your work.

\keywords{leaf, segmentation, classification}

%%%%%% START OF THE PAPER %%%%%%

\begin{document}


%% The ``\maketitle'' command must be the first command after the
%% ``\begin{document}'' command. It prepares and prints the title block.


\teaser{
%        \centering
        \includegraphics[width=\textwidth]{figure/final/collection.jpg}
%        \caption{Segmentation results of leaves from 184 different species.}
%        \label{fig:collection}
}

\maketitle


%% ACM Computing Review (CR) categories. 
%% See <http://www.acm.org/class/1998/> for details.
%% The ``\CRcat'' command takes four arguments.

%% The ``\keywordlist'' command prints out the keywords.
%%% The ``abstract'' environment should contain the abstract for your
%%% content -- one to several paragraphs which describe the work.

\begin{abstract}
Built on Leafsnap's framework \cite{Kumar2012}, we present a more efficient and more robust leaf identification system with very high accuracy.
We used a novel multi-attempt K-means clustering method to perform the leaf segmentation, which is much faster than the Expectation-Maximization 
method employed by \cite{Kumar2012}, and can easily preserve tiny leaves with the presence of non-leaf objects. 
We then apply our post-processing pipeline to extract leaf component from the initial segmentation. 
At this stage we remove false positive regions caused by stem, non-leaf object, and illumination variance robustly. 
Our system accepts 99\% of images from the dataset and still maintains a very high accuracy of 91.1\% within the top 5 matches.
Although \cite{Kumar2012}'s reported accuracy is higher at 96.8\% within top 5 matches, 
they only accept approximately 48\% of images from the dateset. 
Therefore, we have successfully identified 94\% more images that they did only with very slight loss of accuracy.
In this sense, our system is much more practically usable and user-friendly comparing to ``Leafsnap''.
\end{abstract}

%%% The ``\keywordlist'' prints out the user-defined keywords.

\keywordlist

%%% If you are preparing a paper to be presented in the Technical Papers
%%% program at one of our annual flagship events (and, therefore, using 
%%% the ``annual'' parameter to the ``\documentclass'' command), the 
%%% ``\TOGlinkslist'' command prints out the list of hyperlinked icons.
%%% If you are using any other parameter to the ``\documentclass'' command
%%% this command does absolutely nothing.

\TOGlinkslist

%%% The ``\copyrightspace'' command will leave clear an amount of space
%%% at the bottom of the left-hand column on the first page of your paper,
%%% according to the parameter used in the ``\documentclass'' command.

\copyrightspace

\section{Introduction}
\label{sec:introduction}

Our project is inspired by the mobile application ``Leafsnap'', 
which labels plant species given photos of leaves against a untextured light-colored background. 
This is an example of automatic plant recognition, a growing research area in computer vision. 
These scentific methods are developed to assist botanists in their field expeditions,
but they could also be built as tools to help the general audience in learning species information, 
such as this app. Along similar lines, researchers have worked on flower recognition~\cite{Nilsback2008}, bird recognition~\cite{Branson2010}, etc.

While the ``Leafsnap'' app is able to output correct matches with extremely high accuracy, it heavily relies on the requirement of 
placing a flat leaf on an essentially white paper background with very good illumination and little noise. 
We found this too stringent and not very user-friendly for amateurs like us. And their app rejects a lot of images that we believe are qualified.
Our goal is to develop a more robust system that is able to segment a leaf against light color background with moderate amount of noise, 
and attach it with most likely labels from our database. 
    
\section{Related work}
\label{sec:introduction}

\subsection{Previous Work}

\cite{Kumar2012} describes in detail how ``Leafsnap'' works. First, they have trained the recoginition system with data of 184 trees in Northeastern United States, each with hundreds of images (both lab photo and field photos with lighting and color variation). Then, given an user loaded photo, their pipeline is composed of four steps: 
\begin{itemize}
\item \textbf{Classify} whether the input photo is a valid leaf; 
\item \textbf{Segment} the leaf part to get a binary mask; 
\item \textbf{Extract} curvature features at different scales; and 
\item \textbf{Compare} the features with dataset and find nearest neighbors. 
\end{itemize}

For segmentation, in \cite{Kumar2012}, they argued that shape should be the only cue in leaf recognition, 
and other factors like color, pattern, flowers are more noisy than useful. 
Therefore, their segmentation only focuses on extracting the boundary of the leaf, 
generating a binary mask over the photo. 
Their segmentation method is expectation-maximization (EM) based on color in HSV space,
which works well in most cases. 

After initial segmentation, \cite{Kumar2012} suggests removing the stem, filling holes, 
and resizing the leaf to a common area before extracting the curvature features. 
The feature they use is Histograms of Curvature over Scale (HoCS), which consist of 25 curvature histograms of 25 different scales.
Then, they find the match by histogram intersection distance.

Apart from the ``Leafsnap'' paper, there have been some other studies in the area of plant species recognition. 
\cite{Belhumeur2008a} did some pioneer work in building a working computer vision system that aids botanists in 
plant species recognition. They use the Inner Distance Shape Context (IDSC) for leaf shape matching.
\cite{Teng2009a} attempted to recover leaf shape from their 3D position by taking multiple images from different viewpoints.
\cite{cerutti2011a} presented a system based on parametric polygon for leaf segmentation and shape estimation. 
\cite{Valliammal2012b} proposed a method that combines non-linear K-means clustering with Sobel edge detection in leaf segmentation.

\subsection{Our Contribution}
Comparing to ``Leafsnap'', we have built a system that is more practical and user-friendly. 
Our framework is much more robust to noisy background and has much higher acceptance rate 
while still maintaining very high accuracy. 

In particular, our contribution includes the following: 
\begin{itemize}
\item Multi-attempt K-means segmentation which gives 10x faster runtime performance and higher acceptance rate
\item Improved leaf extraction algorithm robust to noisy borders and non-leaf objects
\item Adaptive stem removal algorithm that avoid misidentifications based on leaf dimension
\end{itemize}

The above-mentioned improvements lead to 99\% acceptance rate, comparing to their roughly 48\%.\footnote{This estimation of their acceptance rate is based on the examination of random samples of their provided segmentation result.} This is achieved with only 5.7\% drop 
in test accuracy to 91.1\% compared with their 96.8\%. 
We believe that this is an acceptable, and in fact preferable trade-off towards a more user-friendly system.
We have successfully realized our original goal to loosen their strict requirement on input images 
by providing a more robust segmentation algorithm in the back end.  

\section{Method}
\label{sec:technical}

\subsection{System Overview}

We generally followed \cite{Kumar2012}'s framework. For segmentation, we replace their EM clustering method by our novel multi-attempt K-means clustering method to achieve a much more robust solution. 
We extract the leaf component with our customized post-processing algorithm, and then remove stem and resize the image based on the leaf area as \cite{Kumar2012} suggested.

An overview of our segmentation pipeline is illustrated in Figure~\ref{fig:pipeline}. 
\begin{figure}[!hbt]
	\centering
	\includegraphics[width=0.95\linewidth]{figure/pipeline.png}
	\caption{Leaf segmentation pipeline.}
	\label{fig:pipeline}
\end{figure}

For identification, we exactly followed \cite{Kumar2012}'s method, using Histograms of Curvature over Scale (HoCS) and histogram intersection distance to find the matches.

To build our system, we employ the OpenCV library, which is appropriate for our purpose since it has many image processing tools as well as baseline implementation of commonly used vision algorithms. 

The dataset we use contains images of 184 tree species, including 23147 lab images and 7719 field images.\footnote{\:\url{http://leafsnap.com/dataset/}}

\subsection{Technical Part}

\subsubsection{Segmentation with EM}
\label{sec:segmentation}
We have followed \cite{Kumar2012}'s suggestion in using saturation and value as features, which is 
easily achieved by converting from RGB to HSV space. 
For the main segmentation method, we first used EM.  
EM is an iterative method for solving maximum likelihood estimates of paramters in statistical models. 
It consists of two steps: the expectation (E) step, which computes the expectation of the log-likelihood using the current parameter estimates, and the maximization (M) step, which computes parameters that maximize the expected log-likelihood computed in the E step. 
With respect to image segmentation, EM is often used on a Gaussian mixture model, where each segment or cluster is described by a Gaussian distribution. The parameters thus consist of 1) the weight \(\pi_k\) of each mixture \(k\), and 2) the mean \(\mu_k\) and covariance \(\Sigma_k\) of each Gaussian mixture \(k\). Given feature vector \(x_i\), the probability that it is drawn from this Gaussian mixture model with \(M\) clusters is then 
\begin{equation}
	p[x_i | \Theta = (\pi, \mu, \Sigma)] = \sum_{k=1}^{M} \pi_k p_k[x_i | \mu_k, \Sigma_k], 
\end{equation}
where \(p_k\) represents the probability density function of the normal distribution.
For \(N\) samples, the log likelihood function is thus \(L(x | \Theta) = \sum_{i=1}^N \log P[x_i | \Theta]\). 
For the expectation step, we compute the probability that each sample \(x_i\) belongs to each cluster \(k\): 
\begin{equation}
	p_{i,k} = \frac{\pi_k p_k}{\sum_{l=1}^M \pi_l p_l}
\end{equation}
For the maximization step, we update the parameters that maximize the log likelihood function (\(k=1,...,M\)):
\begin{align}
	\pi_k &= \frac{1}{N} \sum_{i=1}^N p_{i,k}\\
	\mu_k &= \frac{\sum_{i=1}^N x_ip_{i,k}}{\sum_{i=1}^N p_{i,k}}\\
	\Sigma_k &= \frac{\sum_{i=1}^N (x_i-\mu_k)^T(x_i-\mu_k)p_{i,k} }{\sum_{i=1}^N p_{i,k}}
\end{align}
When the algorithm converges, i.e., when the above estimates change less than some threshold in one iteration, we arrive at the model parameters that maximizes the log likelihood. 

To realize this in our system, we call the OpenCV EM library to run this iterative clustering algorithm. 
However, upon actually running our system, we found EM too slow to scale for a large dataset like the one we have. 
Even the baseline implementation with image downsampling takes a long time to run on our laptop. 

After getting the clusters, we determine the leaf segment as the one whose center has higher saturation, 
because a white background has low saturation compared with a green leaf. 

\subsubsection{Segmentation with K-means}
\label{sec:segmentation}
While EM generally works very well, it would be computationally expensive to process a dataset of over 30000 images.
Given our limited computing resource, we explored alternative segmentation methods to 
improve our runtime performance. As mentioned in \cite{Valliammal2012b}, we experimented with K-means, which to our surprise is able 
to improve runtime significantly without hurting segmentation quality much. And later we further develop a new multi-attempt K-means method 
that has comparable quality comparing to EM.

\paragraph{Basic K-Means in leaf segmentation}
K-Means is a clustering method which attempts to minimize the sum of square distances over all samples: 
\begin{equation}
	SSD = \sum_{i} ||x_i - \mu_{\delta_i} ||^2,
\end{equation}
where \(x\)'s refer to feature vectors, \(\mu\)'s refer to cluster centers, and \(\delta\)'s refer to assignments. 
Similar to EM, K-means employs an iterative approach to find the optimal solution. 
The process is composed of two steps after initialization of cluster centers \(\mu\): assignment step and update step. 
In the assignment step, for each sample \(x_i\) we compute the (Euclidean) distance to each cluster center \(\mu_k\) and assign it 
to the closest cluster. Formally, for each sample \(i\), we compute assignment \(\delta_i\) that satisfies 
\begin{equation}
	||x_i - \mu_{\delta_i} ||^2 \le ||x_i - \mu_{k} ||^2, \forall k, 1\le k\le M
\end{equation}
In the update step, we update cluster centers \(\mu\) based on the samples assigned to the cluster:
\begin{equation}
	\mu_k = \mathrm{avg}\{x_i \:| \:\delta_i = k\}
\end{equation}
We compute the SSD error after each iteration, and stop when it is lower than a certain epsilon threshold. 
Though K-means is simple and fast, it does have some drawbacks. The clusters that K-means compute tend to be blob-like in shape, 
whereas EM generally allows clusters to have different shapes. 

Taking into consideration these potential disadvantages, we decide that K-means is the better choice in 
this context due to its significant speedup, as shown in Table~\ref{tab:em_kmeans_time} below. 

\paragraph{Multi-attempts K-Means}
In terms of special treatment for thin leaves and pine needles, the original ``Leafsnap'' paper used initial 
pixel weighting given predefined regions in the saturation-value space. Each region (non-leaf and leaf)
is assigned equal weight. This, according to them, leads to successful segmentation of these hard cases 
that previously failed under the standard EM described above. To address this issue, we devise a novel approach to 
run multiple attempts of K-means. The result proves to be quite robust to not only these thin leaves 
but also to noisy background, as we illustrate in section \ref{sec:experiments}. 

Since our main task is to extract the leaf object, it naturally makes sense to do a 
binary segmentation on the image. However, this is based on the assumption that 1) the background is 
uniformly colored without noise, and 2) the leaf is of considerable size such that it can be picked up
as an independent cluster of its own. The ``Leafsnap'' app imposes a strict requirement on the 
background -- essentially white paper -- to meet 1), and introduces pixel weighting to address 2). In our opinion, 
there is a simpler and more efficient method to bypass these limitations: allow for more clusters. 
While the idea is straightforward, one is faced with the computational complexity associated 
with increasing number of segments. Since K-means is linear with respect to the number of clusters, 
increasing clusters would directly increase our runtime. To avoid this issue, we 
only incrementally increase the number of clusters if the initial attemp fails. (A failed segmentation
is defined as one that does not contain a leaf component with a minimum required area, 
as described in more details in the section \ref{sec:cleanup}.) 
Specifically, if K-means succeeds in the binary segmentation pass, then we move on; if on the other hand, the initial 
pass fails due to shadow, noisy background, or tiny leaf object, we incrementally attempt more 
number of clusters, until it succeeds. We stop at 10 clusters as we found that there is not much
improvement beyond this point, and it is not worth the additional runtime cost. Note that as before, 
among multiple clusters, the leaf object is chosen as the one with highest saturation. Since most images 
in the dataset have standard background and medium size, only a minority of the images (around 10\%)
fail in the first attempt of K-means binary segmentation. In this way, we only incur extra cost 
for these few ``hard'' images without slowing down the majority of ``easy'' ones. Since K-means is very fast, 
even with multiple attempts the average runtime performance is still reasonable for our purpose. Using this approach,
we achieve much better quality segmentation for these images, reaching as high as 99\% acceptance rate over the dataset. 
 
To highlight the improved speed our modified multi-attempt K-means algorithm provides, 
Table \ref{tab:em_kmeans_time} shows a quantitative runtime comparison with EM, using 100 
image samples randomly drawn from the dataset. The results are from 
running a single thread on a MacBook Pro laptop with 2.6 GHz Intel core i5. 
Our segmentation method achieves a 10x speedup while producing result of similar or better quality, 
as shown in section~\ref{sec:experiments}.

\begin{table}[!hbt]
	\centering
	\begin{tabular} {| l | l | l | l |}
		\hline
		Method 					& Total Runtime (s) & Avg Runtime (s) 	\\ \hline
		EM 						& 676.395 			& 6.76	 			\\ \hline
		Multi-attempt K-means 	& 65.998		 	& 0.66		 	   	\\ \hline
		Speedup 				& \multicolumn{2}{c|}{10x}				\\ \hline
	\end{tabular}
	\caption{Runtime comparison of EM and our multi-attempt K-means}
	\label{tab:em_kmeans_time}
\end{table} 

\subsubsection{Leaf Component Extraction}
\label{sec:cleanup}
After initial segmentation using K-means, we extract the leaf component in the following steps: 
\begin{enumerate}
\item Perform dilation on the binary mask
\item Compute connected components on the dilated mask
\item Exclude components whose contour contains large percentage of points near the image border
\item Reset mask as intersection with the largest-area component
\end{enumerate}

To find connected components in step 2, we first attempted using opencv provided findContours along 
with pointPolygonTest: first find contours of each component, then decide whether a pixel is inside a 
component using point polygon test. However, we found that that this approach is not very stable and 
could slightly alter the boundary of the leaf, and in some cases making it one ring smaller. 
Given that our features are very sensitive to the boundary shape of the leaf, we decide to write 
our own connected components algorithm, using the 2 pass method. In the first pass, we traverse 
all the pixels in row-major manner, and compare each nonzero pixel with its neighbors (we use 8 neighbors 
instead of 4 neighbors to allow for more connectivity). The pixel label is chosen to be the minimum 
of its neighbors' labels. If the neighbors have different labels, those labels are marked as equivalent. 
In the second pass, we traverse the image again and re-label each nonzero pixel 
based on the equivalence class we marked in the previous pass. The equivalence classes are established and 
traced using the union-find algorithm. Now that we have found the connected components, we perform a last 
step to re-index them using consecutive numbers from 1, 2, 3, and so on. This makes checking and tracking 
much easier. 

To exclude false positives near the image border in step 3, we extract the boundary of each component,
and check for the percentage of points near the border. This approach is different from what 
``Leafsnap'' does, in that they seem to just strictly check for the 1-layer border (based on their description). 
We found this approach unstable and fails in many cases where the image border is noisy or have alien objects. A 
good example is the color bar in the lab images in the dataset. These objects have varying areas and 
are usually near the image border but not necessarily exactly on the border. 
With substantial tuning from trial and errors, we are able to set our border range and rejection threshold 
appropriately such that we can successfully exclude these non-leaf components. Figures in section~\ref{sec:robust_segmentation}
demonstrate the improvement of our approach versus their segmentation results. 

\subsubsection{Stem Removal}
\label{sec:stemremoval}
As suggested by the ``Leafsnap'' paper, we perform stem removal as a post-processing step to get rid of 
main stem from the leaf mask. Since stems do not provide unique information on the leaf species, it makes 
sense to remove them before extracting leaf boundary. The process is as following:
\begin{enumerate}
\item Perform erosion followed by dilation on the input leaf mask, and subtract from the input leaf mask
\item Compute connected components on this output mask from the previous step
\item Find best stem candidate among these components
\item Remove intersection of the stem component and the input leaf mask
\end{enumerate}

The aim of the first step is to extract thin features from the leaf mask, because these 
are the potential stem candidates. However, if we use a fixed element size for the erosion-dilation
operation, we encounter problems as the leaf shapes exhibit large inter-species variation. 
To solve this issue, we implement adaptive element size to extract thin features relative to the 
leaf size. This is realized by computing an oriented bounding box on the whole leaf first, and then 
assign element size as a fraction of this size. Using adaptive filter size allows us to avoid 
falsely removing pine needles and thin leaves. 

To select the stem from the list of thin features, we fit an oriented bounding box for 
each candidate, and pick the one with the largest ratio of the two sides. However, this 
criterion alone is not stable enough, as small noise near the leaf could be misidentified 
as stem. To alleviate the problem, we impose additional requirement that the area of the stem 
has to pass a minimum threshold and the ratio cannot be too small. If no valid stem candidate is found,
we simply skip this step and leave the leaf mask unchanged, because we do not want to
force remove any non-stem component.

\subsubsection{Resize leaf image}
\cite{Kumar2012} suggested resizing the leaf image to a common area before feature extraction.
They didn't describe the process in detail. Our guideline is that we should not downsize the most 
images too much. We used a common area of 40000 $pixel^2$, which is approximately a 200 by 200 pixel leaf.
This also makes it easier for choose the radius range for computing curvature feature later.

\subsubsection{Identification}
\label{sec:identification}

Having produced the segmentation mask, \cite{Kumar2012} generates Histograms of Curvature over Scale (HoCS) feature to incorporate variations in overall leaf shape as well as fine-scale features like serrations. In order to find the species label, they run a nearest neighbor search on the input photo, using histogram intersection as distance. 

We have followed the paper's description to implement the HoCS features. The steps are straightforward and listed below:
\begin{enumerate}
	\item Extract the leaf boundary from the binary segmentation mask
	\item Generate circular templates over different scales
	\item Slide these templates over the leaf boundary and compute a histogram of curvatures, with curvature defined in two manners:
		\begin{itemize}
			\item arc length of circle inside the boundary
			\item area of circle inside the boundary
		\end{itemize}
\end{enumerate}
\cite{Pottmann2009} offers a clear illustration of the curvatures mentioned above, see Figure~\ref{fig:curvature}.
\begin{figure}[!hbt]
    \centering
	\includegraphics[width=\linewidth]{figure/area_arclength_illustration.png}
    \caption{Curvature definition: area of circle inside domain and arclength of circle inside domain}
    \label{fig:curvature}
\end{figure}

As mentioned above, the sidelength of the leaf is roughly around 200 after resizing. Thus, we use radius from 2 to 50, which work very well for most cases, including those long leaves.

%Figure~\ref{fig:histogram1} and Figure~\ref{fig:histogram2} show the HoCS output for the sample images referenced 
%in subection \ref{sec:segmentation} above.

%\begin{figure}
%	\centering
%	\begin{tabular}{c}
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/1/curvature_arclength_5.png}
%		\caption{Histogram of small-scale curvature (arclength)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/1/curvature_area_5.png}
%		\caption{Histogram of small-scale curvature (area)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/1/curvature_arclength_20.png}
%		\caption{Histogram of large-scale curvature (arclength)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/1/curvature_area_20.png}
%		\caption{Histogram of large-scale curvature (area)}
%	\end{subfigure}
%	\end{tabular}
%	\caption{HoSC for one leaf sample}
%	\label{fig:histogram1}
%\end{figure}
%
%\begin{figure}
%	\centering
%	\begin{tabular}{c}
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/2/curvature_arclength_5.png}
%		\caption{Histogram of small-scale curvature (arclength)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/2/curvature_area_5.png}
%		\caption{Histogram of small-scale curvature (area)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/2/curvature_arclength_20.png}
%		\caption{Histogram of large-scale curvature (arclength)}
%	\end{subfigure}
%	\\
%	\begin{subfigure}{0.8\linewidth}	
%		\includegraphics[width=\linewidth]{figure/2/curvature_area_20.png}
%		\caption{Histogram of large-scale curvature (area)}
%	\end{subfigure}
%	\end{tabular}
%	\caption{HoSC for another leaf sample}
%	\label{fig:histogram2}
%\end{figure}

\section{Experiments}
\label{sec:experiments}

\subsection{Robust segmentation}
\label{sec:robust_segmentation}
As the result of our novel segmentation and well-customized post-processing tools, we accept 30501 out of 30866 leaf images from the dataset (99\%), twice as many as \cite{Kumar2012} did.

The robustness of our segmentation and post-processing tools can be demonstrated in Figure~\ref{fig:robust_segmentation1}-\ref{fig:robust_segmentation4}, compared with the segmentation results kindly provided by \cite{Kumar2012}. 
Note that, for ``field'' leaf images (Figure~\ref{fig:robust_segmentation1}-\ref{fig:robust_segmentation2}), \cite{Kumar2012}'s segmentation generally works well. Yet our segmentation can still be slightly better than their results. In the case of poor illumination, half of their segmentations failed, while our method works stably (Figure~\ref{fig:robust_segmentation2}).

\begin{figure*}
	\centering
	\begin{tabular}{c|c}
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/our_segmented_field_albizia_julibrissin.jpg}
		\caption{Our segmented results for the field images of albizia julibrissin.}
	\end{subfigure} & 
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/their_segmented_field_albizia_julibrissin.png}
		\caption{Kumar et al. 2012's segmented results the field images of albizia julibrissin.}
	\end{subfigure} \\
	\end{tabular}
	\caption{Segmentation result - field image - albizia julibrissin: 
        It is a difficult category because of its thin features. Our result is slightly better.
    }
	\label{fig:robust_segmentation1}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{c | c}
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/our_segmented_field_amelanchier_laevis.jpg}
		\caption{Our segmented results for the field images of amelanchier laevis.}
	\end{subfigure} & 
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/their_segmented_field_amelanchier_laevis.png}
		\caption{Kumar et al. 2012's segmented results for the field images of amelanchier laevis.}
	\end{subfigure} \\
	\end{tabular}
	\caption{Segmentation result - field image - amelanchier laevis: 
        It is a difficult category because of shadow and illumination. Our result is much better.
    }
	\label{fig:robust_segmentation2}
\end{figure*}

For ``lab'' leaf images (Figure~\ref{fig:robust_segmentation3}-\ref{fig:robust_segmentation4}), \cite{Kumar2012}'s segmentation failed in many cases. The main reason is that their segmentation method tends to take the color panel as the leaf, especially when the leaf is tiny and thin (Figure~\ref{fig:robust_segmentation4}).

\begin{figure*}
	\centering
	\begin{tabular}{c | c}
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/our_segmented_lab_crataegus_pruinosa.jpg}
		\caption{Our segmented results for the lab images of crataegus pruinosa.}
	\end{subfigure} & 
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/their_segmented_lab_crataegus_pruinosa.png}
		\caption{Kumar et al. 2012's segmented results for the lab images of crataegus pruinosa.}
	\end{subfigure} \\
	\end{tabular}
	\caption{Segmentation result - lab image - crataegus pruinosa: 
        When their segmentation method still works for some cases, our method is way more succesful and extract all the leave correctly in this category.
    }
	\label{fig:robust_segmentation3}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{c | c}
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/our_segmented_lab_pinus_bungeana.jpg}
		\caption{Our segmented results for the lab images of pinus bungeana.}
	\end{subfigure} &
	\begin{subfigure}{0.5\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/their_segmented_lab_pinus_bungeana.png}
		\caption{Kumar et al. 2012's segmented results for the lab images of pinus bungeana.}
	\end{subfigure} \\
	\end{tabular}
	\caption{Segmentation result - lab image - pinus bungeana: 
        When their segmentation method is totally misleaded by the non-leaf object, our method succesfully extracted the leaves for most cases.
    }
	\label{fig:robust_segmentation4}
\end{figure*}

\subsection{Additional images}
In addition to the Leafsnap dataset, we also take a lot of leaf image from our neighborhood. We intentionally use a poor camera from a mobile device, and place the leaves on a slight worse background with poor illumination. Note that the unclear camera itself also generates a lot of noises. It turns out our method still succeeds in many cases, and provides segmentation results qualified for identification. 
See Figure~\ref{fig:additional1}-\ref{fig:additional5}.
It is worth mentioning that our multi-attempt K-Means segmentation and leaf component extraction work greatly for these kinds of images.
With the combination of both, most noises caused by shadow and non-uniform background are eliminated.

\begin{figure*}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/749/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/749/segmented.jpg}
		\caption{Segmented}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/749/destemed.jpg}
		\caption{Post-processed}
	\end{subfigure}
	\end{tabular}
	\caption{Segmentation result - additional image taken by us: 
        The leaf has thin features, and the image is dirty. Our method works fairly well.
    }
	\label{fig:additional1}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/758/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/758/segmented.jpg}
		\caption{Segmented}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/758/segmented3.jpg}
		\caption{Segmented}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/758/destemed.jpg}
		\caption{Post-processed}
	\end{subfigure}
	\end{tabular}
	\caption{Segmentation result - additional image taken by us: 
        While the image has severe shadow problem, our method works really well.
    }
	\label{fig:additional2}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/752/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/752/segmented.jpg}
		\caption{Segmented}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/752/destemed.jpg}
		\caption{Post-processed}
	\end{subfigure}
	\end{tabular}
	\caption{Segmentation result - additional image taken by us: 
        Our method is robust to gray scale background noise.
    }
	\label{fig:additional3}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/742/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/742/segmented.jpg}
		\caption{Segmented}
	\end{subfigure}
	\begin{subfigure}{0.2\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/742/destemed.jpg}
		\caption{Post-processed}
	\end{subfigure}
	\end{tabular}
	\caption{Segmentation result - additional image taken by us: 
        Our method is robust to gray scale background noise.
    }
	\label{fig:additional4}
\end{figure*}

\begin{figure*}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.19\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/746/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.19\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/746/segmented.jpg}
		\caption{Segmented with 2 clusters}
	\end{subfigure}
	\begin{subfigure}{0.19\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/746/segmented3.jpg}
		\caption{Segmented with 3 clusters}
	\end{subfigure}
	\begin{subfigure}{0.19\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/746/segmented4.jpg}
		\caption{Segmented with 4 clusters}
	\end{subfigure}
	\begin{subfigure}{0.19\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/746/destemed.jpg}
		\caption{Post-processed}
	\end{subfigure}
	\end{tabular}
	\caption{Segmentation result - additional image taken by us: 
        Multi-attempt K-means smartly stoped after it find the right leaf component.
    }
	\label{fig:additional5}
\end{figure*}

\subsection{Identification Accuracy}
Figure~\ref{fig:accuracy} is a plot of our test accuracy for different maximum species match indices. 
The test is performed across all samples in the dataset using the leave-one-out approach. 
Table~\ref{tab:accuracy} lists some quantitative results. 
We believe that if time permits, with further tuning of our parameters, we could potentially achieve higher 
accuracy result, but we feel that the current results are good enough given our 
goal. 
\begin{figure}
	\centering
    \includegraphics[width=\linewidth]{figure/accuracy.png}
	\caption{Test accuracy plot}
	\label{fig:accuracy}
\end{figure}

\begin{table}[!hbt]
	\centering
	\begin{tabular} {| l | l | l | l |}
		\hline
		Maximum species match index & 1 		& 5 		& 25 			\\ \hline
		Test accuracy 				& 76.0\%	& 91.1\%	& 98.4\%		\\ \hline
	\end{tabular}
	\caption{Test accuracy result}
	\label{tab:accuracy}
\end{table} 

\subsection{Limitation}
\subsubsection{Thin leaf with shadow}
One of the limitation we found is that, when leaf is very thin and the image also has shadow problem, our segmentation method could fail, as shown in Figure~\ref{fig:limitation1}.
This is actually caused by the way we detect noisy region on the image boundary.
We used the portion of the arc length of the region to see if a region is near the border, which can be replaced by area. We believe, with this change, because the thin leaf does not have much area, the segmented region will be recognized a boundary region, and the multi-attempt K-Means can move to next attempt and finally find the correct component.

\begin{figure}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_pinus_wallichiana/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_pinus_wallichiana/our.jpg}
		\caption{Our result}
	\end{subfigure}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_pinus_wallichiana/their.png}
		\caption{Kumar et al. 2012's esult}
	\end{subfigure}
	\end{tabular}
	\caption{Limitation - Tiny leaf against severe shadow}
	\label{fig:limitation1}
\end{figure}

\subsubsection{Colorful background}
Another drawback of our method is that it will fail when the image has a colorful background, as shown in Figure~\ref{fig:limitation2}. This is mainly because of our multi-attempt K-Means chooses the leaf component based on saturation value at every step. Although this could only affect 10\% of our dataset images at least, and affected much fewer than that based on our experiment, we believe this is still a major drawback of our system.
This could be fixed by a better way to choose leaf component or exploring other feature representations.

\begin{figure}
	\centering
	\begin{tabular}{ccc}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_crataegus_laevigata/original.jpg}
		\caption{Input}
	\end{subfigure}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_crataegus_laevigata/our.jpg}
		\caption{Kumar et al. 2012's esult}
	\end{subfigure}
	\begin{subfigure}{0.3\linewidth}	
		\includegraphics[width=\linewidth]{figure/final/limitation_crataegus_laevigata/their.png}
		\caption{}
	\end{subfigure}
	\end{tabular}
	\caption{Limitation - Colorful background}
	\label{fig:limitation2}
\end{figure}

\section{Conclusions}
\label{sec:conclusions}
In this paper we have described an efficient and robust leaf segmentation and identification system. 
The work was motivated by our frustration from using the app ``Leafsnap'', which places too stringent 
requirement on the input leaf images. We have successfully overcome this issue and significantly 
reduced the rejection rate by implementing a more robust segmentation pipeline in the back-end. 
The framework of our system is taken from that of ``Leafsnap'', but with major improvements in 
segmentation method and post-processing pipelines, as illustrated in details in section~\ref{sec:technical}. 
We have presented both qualitative results to demonstrate the strength of our algorithm compared 
with that of \cite{Kumar2012}, and quantitative results to show the efficiency and accuracy of our algorithm.
In summary, we have realized our goal to build a system that is more user-friendly and practical 
for the task addressed by ``Leafsnap'', with much higher acceptance rate for input images with
mininal impact on classification accuracy. However, our system also has drawbacks. For example, 
when the leaves are very thin or when the background is colored, our segmentation result may be less than ideal. 

\bibliographystyle{acmsiggraph}
\bibliography{reference}

\end{document}
