%\documentclass{acmsiggraph}                     % final
%\documentclass[annual]{acmsiggraph}
%\documentclass[annualconference]{acmsiggraph}  % final (annual conference)
\documentclass[preprint]{acmsiggraph}            % review
%\documentclass[widereview]{acmsiggraph}        % wide-spaced review
%\documentclass[preprint]{acmsiggraph}          % preprint

%% Uncomment one of the five lines above depending on where your paper is
%% in the conference process. ``review'' and ``widereview'' are for review
%% submission, ``preprint'' is for pre-publication, and ``final'' is for
%% the version to be printed. The ``final'' variant will accept the 
%% ``annualconference'' parameter, which changes the height of the space
%% left clear for the ACM copyright information.

%% The 'helvet' and 'times' packages define the typefaces used for
%% serif and sans serif type in this document. Computer Modern Roman 
%% is used for mathematics typesetting. The scale factor is set to .92
%% to bring the sans-serif type in line with the serif type.

\usepackage[scaled=.92]{helvet}
\usepackage{times}

\usepackage{multirow}
\usepackage{algorithm,algorithmic}
\usepackage{multirow}

%http://tex.stackexchange.com/questions/3445/latex-tables-how-do-i-make-bold-horizontal-lines-typically-hline
\makeatletter
\def\hlinewd#1{%
\noalign{\ifnum0=`}\fi\hrule \@height #1 %
\futurelet\reserved@a\@xhline} 
\makeatother

%% The 'graphicx' package allows for the inclusion of EPS figures.

\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{wrapfig}
\usepackage{overpic}

%% use this for zero \parindent and non-zero \parskip, intelligently.

\usepackage{parskip}

%% Optional: the 'caption' package provides a nicer-looking replacement
%% for the standard caption environment. With 'labelfont=bf,'textfont=it',
%% caption labels are bold and caption text is italic.

\usepackage[labelfont=bf,textfont=it]{caption}
%\usepackage{subfig}

\usepackage{subcaption}

%\newcommand{\mydraft}{true}
\newcommand{\mydraft}{false}
%\interdisplaylinepenalty=2500

\newcommand{\magicfigure}[8]{
% #1 is width of the small guys
% #2 is the height of the big guy
% #3 is horizontal pad
% #4-#9 are figures
{\hfill\hbox{\includegraphics[height=#2,draft=\mydraft]{#4}\hspace{#3}\vbox to #2{\hbox{\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#3}\includegraphics[width=#1,draft=\mydraft]{#6}}\vfill\hbox{\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{#3}\includegraphics[width=#1,draft=\mydraft]{#8}}}}\hfill}
}

\newcommand{\supermagicfigure}[9]{
% #1 is width of the small guys
% #2 is the height of the big guy
% #3 is horizontal pad
% #4-#9 are figures
{\hfill\hbox{\includegraphics[height=#2,draft=\mydraft]{#3}\hspace{.01in}\vbox to #2{\hbox{\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#6}}\vfill\hbox{\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#8}\hspace{.01in}\includegraphics[width=#1,draft=\mydraft]{#9}}}}\hfill}
}

\newcommand{\fourfigure}[6]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}}}
}

\newcommand{\fourfigureline}[6]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\hspace{.1in}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}}}
}


\newcommand{\fivefigure}[7]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#7}}}
}

\newcommand{\sixfigure}[8]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#6 is figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#6}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#7}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#8}}}
}


\newcommand{\threefigure}[5]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#5 is figures
{\hbox{\hspace{.1in}\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#5}}}
}

\newcommand{\twofigure}[4]{
% #1 is width of one small guy
% #2 is horiz pad
% #3-#4 are figures
{\hbox{\includegraphics[width=#1,draft=\mydraft]{#3}\hspace{#2}\includegraphics[width=#1,draft=\mydraft]{#4}}}
}

\newcommand{\myname}[1]{\hspace{-.15in}#1}

%% If you are submitting a paper to the annual conference, please replace 
%% the value ``0'' below with the numeric value of your OnlineID. 
%% If you are not submitting this paper to the annual conference, 
%% you may safely leave it at ``0'' -- it will not be included in the output.

%\TOGonlineid{46}
\TOGvolume{0}
\TOGnumber{0}
\TOGarticleDOI{1111111.2222222}
\TOGprojectURL{}
\TOGvideoURL{}
\TOGdataURL{}
\TOGcodeURL{}

%% Paper title.

%no abreviations
\title{Leaf segmentation and recognition}

%% Author and Affiliation (single author).

%%\author{Roy G. Biv\thanks{e-mail: roy.g.biv@aol.com}\\Allied Widgets Research}

%% Author and Affiliation (multiple authors).

%\author{Roy G. Biv\thanks{e-mail: roy.g.biv@aol.com}\\ Starbucks Research %
%\and Ed Grimley\thanks{e-mail:ed.grimley@aol.com}\\Nigel Mansell\thanks{nigelf1@msn.com}\\ Grimley Widgets, Inc. %
%\and Martha Stewart\thanks{e-mail:martha.stewart@marthastewart.com}\\ Martha Stewart Enterprises \\ Microsoft Research}

\author{
    Ning Jin\thanks{e-mail: njin19@stanford.com}\\ Stanford University \and 
    Wenlong Lu\thanks{e-mail:wenlongl@stanford.com}\\ Stanford University
}

%\author{Matthew Cong\thanks{e-mail: \{mdcong,mikebao,rfedkiw\}@stanford.edu}\\Stanford University\\Industrial Light + Magic%
%\and \myname{Michael Bao$^*$}\\Stanford University
%\and \myname{Ronald Fedkiw$^*$}\\ \myname{Stanford University}\\ \myname{Industrial Light + Magic}}
%
\pdfauthor{Ning Jin, Wenlong Lu}

%% Keywords that describe your work.

\keywords{}

%%%%%% START OF THE PAPER %%%%%%

\begin{document}


%% The ``\maketitle'' command must be the first command after the
%% ``\begin{document}'' command. It prepares and prints the title block.

\maketitle

%% ACM Computing Review (CR) categories. 
%% See <http://www.acm.org/class/1998/> for details.
%% The ``\CRcat'' command takes four arguments.

%% The ``\keywordlist'' command prints out the keywords.

\copyrightspace

\section{Introduction}
\label{sec:introduction}

\begin{figure}
\centering
\begin{tabular}{c c}
    \begin{subfigure}{.15\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231172389.jpg}
        \caption{}
        \label{fig:sfig1}
    \end{subfigure}%
    &
    \begin{subfigure}{.25\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231171458.jpg}
        \caption{}
        \label{fig:sfig1}
    \end{subfigure}%
    \\
    \begin{subfigure}{.15\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231170979.jpg}
        \caption{}
        \label{fig:sfig1}
    \end{subfigure}%
    &
    \begin{subfigure}{.25\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231170455.jpg}
        \caption{}
        \label{fig:sfig1}
    \end{subfigure}%
\end{tabular}
    \caption{Leaves identified to be the same species.}
    \label{fig:fig}
\end{figure}

Our project is inspired by the mobile application ``Leafsnap'', 
which labels plant species given photos of leaves against a untextured light-colored background. 
This is an example of automatic plant recognition, a growing research area in computer vision. 
These scentific methods are developed to assist botanists in their field expeditions,
but they could also be built as tools to help the general audience in learning species information, 
such as in the case of this app. Along similar lines, researchers have worked on flower recognition~\cite{Nilsback2008}, bird recognition~\cite{Branson2010}, etc.

While the ``Leafsnap'' app is able to output correct matches with high accuracy, it heavily relies on the requirement of 
placing a flat leaf on an essentially white paper background, which we found to be too stringent and not very user-friendly 
for amateurs like us. Ideally, we would like to develop a more robust tool that is able to segment a leaf against potentially noisy background, and attach it with most likely labels from our database. 
    
\section{Technical background}
\label{sec:technical}

\subsection{System Overview}

\cite{Kumar2012} describes in detail how ``Leafsnap'' works. First, they have trained the recoginition system with data of 184 trees in Northeastern United States, each with hundreds of images (both lab photo and field photos with lighting and color variation). Then, given an user loaded photo, their pipeline is composed of four steps: 
\begin{itemize}
\item \textbf{Classify} whether the input photo is a valid leaf; 
\item \textbf{Segment} the leaf part to get a binary mask; 
\item \textbf{Extract} curvature features at different scales; and 
\item \textbf{Compare} the features with dataset and find nearest neighbors. 
\end{itemize}

We will generally follow their system, and try to improve/replace the segmentation algorithm to achieve more user-friendly solution. 
In the case we could not achieve a satisfying segmentation method for leaves on a noisy background, we might also consider incorporate more features (including some interior features) to help identify the species. 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% addtions 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
To build our system, we decide to employ the OpenCV library, which is appropriate for our purpose since it has many image processing tools as well as baseline implementation of commonly used vision algorithms. 

\subsection{Segmentation}
\label{sec:segmentation}

\begin{figure}
    \begin{subfigure}{.22\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231172389.jpg}
        \caption{Leaf flattened on a white paper}
        \label{fig:sfig1}
    \end{subfigure}%
    \begin{subfigure}{.22\textwidth}
        \centering
        \includegraphics[width=.9\textwidth]{figure/13292231172389.png}
        \caption{Segmentation result}
        \label{fig:sfig1}
    \end{subfigure}%
    \caption{A segmentation result from Kumar et al. 2012}
    \label{fig:fig}
\end{figure}

In \cite{Kumar2012}, they argued that shape should be the only cue in leaf recognition, 
and other factors like color, pattern, flowers are more noisy than useful. 
Therefore, their segmentation only focuses on extracting the boundary of the leaf, 
generating a binary mask over the photo. 
The main segmentation method is expectation-maximization (EM) based on color in HSV space, 
followed by post-processing steps that remove false positive regions and the leaf stem. 
Their method works well in most cases, but may fail due to the presence ofshadows and specular highlights. 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% addtions 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
We have implemented their color-based segmentation method using EM. 
Expectation-maximization is an iterative method for solving maximimum likelihood estimates of paramters in statistical models. 
It consists of two steps: the expectation (E) step, which computes the expectation of the log-likelihood using the current parameter estimates, and the maximization (M) step, which computes parameters that maximize the expected log-likelihood computed in the E step. 
With respect to image segmentation, EM is often used on a Gaussian mixture model, where each segment or cluster is described by a Gaussian distribution. The parameters thus consist of 1) the weight \(\pi_k\) of each mixture \(k\), and 2) the mean \(\mu_k\) and covariance \(\Sigma_k\) of each Gaussian mixture \(k\). Given feature vector \(x_i\), the probability that it is drawn from this Gaussian mixture model with \(M\) clusters is then 
\begin{equation}
	p[x_i | \Theta = (\pi, \mu, \Sigma)] = \sum_{k=1}^{M} \pi_k p_k[x_i | \mu_k, \Sigma_k], 
\end{equation}
where \(p_k\) represents the probability density function of the normal distribution.
For \(N\) samples, the log likelihood function is thus \(L(x | \Theta) = \sum_{i=1}^N \log P[x_i | \Theta]\). 
For the expectation step, we compute the probability that each sample \(x_i\) belongs to each cluster \(k\): 
\begin{equation}
	p_{i,k} = \frac{\pi_k p_k}{\sum_{l=1}^M \pi_l p_l}
\end{equation}
For the maximization step, we update the parameters that maximize the log likelihood function (\(k=1,...,M\)):
\begin{align}
	\pi_k &= \frac{1}{N} \sum_{i=1}^N p_{i,k}\\
	\mu_k &= \frac{\sum_{i=1}^N x_ip_{i,k}}{\sum_{i=1}^N p_{i,k}}\\
	\Sigma_k &= \frac{\sum_{i=1}^N (x_i-\mu_k)^T(x_i-\mu_k)p_{i,k} }{\sum_{i=1}^N p_{i,k}}\\
\end{align}
When the algorithm converges, i.e., when the above estimates change less than some threshold in one iteration, we arrive at the model parameters that maximizes the log likelihood. 

To realize this in our system, first we prepare the input by converting the given image to HSV space and extracting the saturation 
and value as features. 
Then we call the OpenCV EM library to run this iterative clustering algorithm. 
Figure~\ref{fig:segmentation1} and Figure~\ref{fig:segmentation2} show result for two leaf samples from field images.
For the baseline implementation, we do not pass in fine-tuned initialization values, unlike the paper which uses 
pre-computed expected values for mean and scaled identity matrix for covariance. 
Another difference is that we do not re-balance the pixel weights such that the sum of the leaf pixels and non-leaf pixels are equal.
In addition, we do not perform post-processing to remove stems or use connected components to get rid of false positives. Motivated from some of the not so successful segmentation results, we might attempt different features, for example alternative color space, gradient, position, etc. 

\begin{figure}[!bth]
	\centering
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/original.jpg}
		\caption{Original leaf image}
	\end{subfigure}
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/segmentation.jpg}
		\caption{Segmentation result}
	\end{subfigure}
	\caption{Segmentation result for one leaf sample using EM}
	\label{fig:segmentation1}
\end{figure}
\begin{figure}[!bth]
	\centering
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/original.jpg}
		\caption{Original leaf image}
	\end{subfigure}
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/segmentation.jpg}
		\caption{Segmentation result}
	\end{subfigure}
	\caption{Segmentation result for another leaf sample using EM}
	\label{fig:segmentation2}
\end{figure}

Apart from EM, we have also tried the simple K-means segmentation, again using OpenCV library implementation. 
The result for the first leaf sample is shown below in Figure~\ref{fig:kmeans1}, and it works better than the result achieved in 
EM in Figure~\ref{fig:segmentation1}. However, to fully evaluate the performance of these segmentation methods, we 
would need to do run more trials on the dataset images.


Other works have seeked different approaches, which we could experiment with. 
For example, \cite{Valliammal2012b} proposed a method that combines non-linear K-means clustering with Sobel edge detection in leaf segmentation.
\cite{Teng2009a} attempts to recover leaf shape from their 3D position by taking multiple images from different viewpoints. 
\cite{cerutti2011a} presents a system based on parametric polygon for leaf segmentation and shape estimation. 

\begin{figure}[!bth]
	\centering
	\begin{tabular}{c c}
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1_kmeans/original.jpg}
		\caption{Original leaf image}
	\end{subfigure}
	&
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1_kmeans/segmentation.jpg}
		\caption{Segmentation result using K-means}
	\end{subfigure}
	\\
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/segmentation2.jpg}
		\caption{Leaf extracted from EM segmentation}
	\end{subfigure}
	&
	\begin{subfigure}{0.45\linewidth}	
		\includegraphics[width=\linewidth]{figure/1_kmeans/segmentation2.jpg}
		\caption{Leaf extracted from K-means segmentation}
	\end{subfigure}
	\end{tabular}
	\caption{Comparison of segmentation results from EM and K-means}
	\label{fig:kmeans1}
\end{figure}

\subsection{Identification}

Having produced the segmentation mask, \cite{Kumar2012} generate Histograms of Curvature over Scale (HoCS) feature to incorporate variations in overall leaf shape as well as fine-scale features like serrations. In order to find the species label, they run a nearest neighbor search on the input photo, using histogram intersection as distance. 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% addtions 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
We have followed the paper's description to implement the HoCS features. The steps are straightforward and listed below:
\begin{enumerate}
	\item Extract the leaf boundary from the binary segmentation mask
	\item Generate circular templates over different scales
	\item Slide these templates over the leaf boundary and compute a histogram of curvatures, with curvature defined in two manners:
		\begin{itemize}
			\item arc length of circle inside the boundary
			\item area of circle inside the boundary
		\end{itemize}
\end{enumerate}
\cite{Pottmann2009} offers a clear illustration of the curvatures mentioned above, see Figure~\ref{fig:curvature}.
\begin{figure}[!hbt]
    \centering
	\includegraphics[width=\linewidth]{figure/area_arclength_illustration.png}
    \caption{Curvature definition: area of circle inside domain and arclength of circle inside domain}
    \label{fig:curvature}
\end{figure}

Figure~\ref{fig:histogram1} and Figure~\ref{fig:histogram2} show the HoCS output for the sample images referenced 
in subection \ref{sec:segmentation} above.

\begin{figure}
	\centering
	\begin{tabular}{c}
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/curvature_arclength_5.png}
		\caption{Histogram of small-scale curvature (arclength)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/curvature_area_5.png}
		\caption{Histogram of small-scale curvature (area)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/curvature_arclength_20.png}
		\caption{Histogram of large-scale curvature (arclength)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/1/curvature_area_20.png}
		\caption{Histogram of large-scale curvature (area)}
	\end{subfigure}
	\end{tabular}
	\caption{HoSC for one leaf sample}
	\label{fig:histogram1}
\end{figure}

\begin{figure}
	\centering
	\begin{tabular}{c}
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/curvature_arclength_5.png}
		\caption{Histogram of small-scale curvature (arclength)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/curvature_area_5.png}
		\caption{Histogram of small-scale curvature (area)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/curvature_arclength_20.png}
		\caption{Histogram of large-scale curvature (arclength)}
	\end{subfigure}
	\\
	\begin{subfigure}{0.8\linewidth}	
		\includegraphics[width=\linewidth]{figure/2/curvature_area_20.png}
		\caption{Histogram of large-scale curvature (area)}
	\end{subfigure}
	\end{tabular}
	\caption{HoSC for another leaf sample}
	\label{fig:histogram2}
\end{figure}


In addition, as a preliminary idea, when we are not able to achieve the same level of segmentation quality with noisy input images and our new segmentation method, we may implement 2D registration~\cite{fitzgibbon2003robust} to find the orientation of the leaf, and then make use of more features based on orientation to help idenfication.

\section{Milestones achieved}
\label{sec:milestones}
\begin{enumerate}
\item
    Implement Color-based Segmentation using EM
\item
    Implement Color-based Segmentation using K-means
\item
    Generate Histograms of Curvature-ArcLength over Scale
\item
    Generate Histograms of Curvature-Area over Scale
\end{enumerate}


\section{Milestones remaining}
\label{sec:milestones}
Goals:
\begin{enumerate}
\item
    Implement classification
\item
    Build the system to integrate the algorithms, and process the dataset
\item
    Investigate Better Segmentation Solution
\item
    Develop User Interface
\end{enumerate}

\bibliographystyle{acmsiggraph}
\bibliography{reference}

\end{document}
