\documentclass{llncs}

\usepackage[chapter]{algorithm}
\usepackage[noend]{algorithmic}
\usepackage{makeidx}
\usepackage{amsmath}

\begin{document}

\frontmatter

\title{Attribute Aware Pose Estimation}

\maketitle

\begin{abstract}
    In this paper, we extract some clothing attributes from an input image, to help the pose estimation task.
    Pose estimation is an important task in Computer Vision. 
    The state-of-art method is Yang.\cite{DBLP:conf/cvpr/YangR11}.
    But they did not care about the clothing attributes, such as sleeve, texture, neckline.
    These attributes' information will help us to improve pose estimation task.
    We treat these attributes as latent variables, then perform the learning process by Latent SVM algorithm.
    The constribution of our work is:
    (1) Add latent clothing attribute variables to improve the performance of pose estimation;
    (2) Group the person images by clothing attributes.
\end{abstract}

\section{Introduction}
\subsection{introduce the pose estimation task}
we describe an approach to improve the perormance of pose estimation and group the person images by using clothing attributes.

%\figure{1. an image for describe our motivation}

\subsection{Novelty and relations with exsiting works}


\subsection{Our approach}
Regard the clothing attributes as latent variable, then perform the learning process by Latent SVM algorithm.


\section{Model}
\subsection{Pose-related features}
    We will train the appearance model and deformation model like the mothod of Yang.\cite{DBLP:conf/cvpr/YangR11}.
    Let us write $\mathit{I}$ for an image, $l_i = (x, y)$ for the pixel location of part i and $t_i$ for the mixture component of part i.
    We write $i \in \left\{1, ...K\right\}, l_i \in \left\{1, ...L\right\}$ and $t_i \in \left\{1, ...T\right\}$.
    
    \textbf{Co-occurrence model:} To score of a confituration of parts, we first define a compatibility function for part types that factors into a sum of local and pairwise scores:
    \begin{equation}
        S(t) = \sum_{\substack{i \in V}} b_i^{t_i} + \sum_{\substack{ij \in E}} b_{ij}^{t_i, t_j}
    \end{equation}
    The parameter $b_i^{t_i}$ favors particular type assignments for part i, while the pairwise parameter $b_{ij}^{t_i, t_j}$ favors particular co-occurrences of part types.

    \textbf{Appearance and Deformation model:}
    \begin{equation}
        S(I, l, t) = S(t) + \sum_{\substack{i \in V}} \mathit{w}_i^{t_i} \cdot \phi(I, l_i) + \sum_{\substack{ij \in E}} \mathit{w}_{ij}^{t_i, t_j} \cdot \psi(l_i - l_j)
        \label{eq:pose}
    \end{equation}

\subsection{Clothing-related features}
    We write the clothing-realated features as $\Psi(I, p_i, c_j)$, 
    denotes the features extracted from $\mathit{I}$ under the constraints of part label $p_i$ and attribute label $c_j$.

    To describe the design of $\Psi{I, p_i, c_j}$, 
    we first convert the clothing label $c_j$ to an identifying vector $Id(c_j)$, 
    with only one dimension assigned to value "1" and others to "0".
    The low-level feature descriptors of the j-th clothing attribute $F_j(p_i)$ depend on two aspects:
    the corresponding human part(s) and the feature type.
    We use $F_j(p_i)$ to denote features of the j-th clothing attribute under the part candidate $p_i$.
    The our clothing-related feature $\Psi(I, p_i, c_j)$ is represented as follws:
    \begin{equation}
        \Psi(I, p_i, c_j) = F_j(p_i) \otimes Id(c_j)
    \end{equation}
    where the "$\otimes$" operator represents the outer product of two vectors and we map the resulting matrix to a vector by the row order.

\subsection{Score Function}
    Here the clothing attributes has no label information, so the latent values set is composed of clothing attributes.
    We encode the clothing attributes to above score function \ref{eq:pose}.

    Now let us write the full score associated with a configuration of part types, part positions and clothing attributes.
    Consider a classifier that scores an example $x$ with a function of the form, 
    \begin{equation}
        \mathit{f}_\beta(x) = max_{\substack{z \in Z(x)}} \beta \cdot \Phi(x, \mathit{z}).
    \end{equation}
    Here $\beta$ is a vector of model parameters and $\mathit{z}$ are the latent values.
    The set $Z(x)$ defines the possible latent values for an examples $\mathit{x}$.
    A binary label for $\mathit{x}$ can be obtained by thresholding its score.

    \begin{equation}
        \Phi(x, \mathit{z}) = \sum_{\substack{i \in V}} 1 + \sum_{\substack{ij \in E}} 1 + \sum_{\substack{i \in V}} \phi(I, l_i) + \sum_{\substack{ij \in E}} \psi(l_i - l_j) + \sum_{\substack{i \in V}} \sum_{\substack{j}} \Psi(I, p_i, c_j) 
    \end{equation}


\section{Inference} \footnote{Pictorial Structure + Attribute Foreach}   
    Inference corresponds to maximizing $\beta \cdot \Psi(x, z)$.
    For the pose part, we use pictorial structure like Yang.\cite{DBLP:conf/cvpr/YangR11}.
    For the clothing attributes, we compute score by iterating each value of each clothing attribute.
    The whole procedure is Algorithm \ref{alg:inference}.

    \begin{algorithm}
        \label{alg:inference}
        \caption{\textbf{Procedure} Inference} 
        \textbf{Data:} An image I to predict, Model $\beta$ \\
        \textbf{Result:} pose part locations, clothing attribute type \\
        1. (l, t) = classical-pictorial-structure(I, $\beta$) \\
        2. \textbf{repeat} \\
        3. $\quad$ \textbf{for} j := 1 \textbf{to} num-attribute-values \textbf{do} \\
        4. $\quad$ $\quad$ compute $\Psi(I, p_i, c_j) = F_j(p_i) \otimes Id(c_j)$ \\
        5. $\quad$ $\quad$ score = equation \ref{eq:pose}, score function \\
        6. $\quad$ \textbf{end} \\
        7. $\quad$ attr = the attribute value related to the maxmium score \\
        7. $\quad$ (l, t, attr) = pictorial-structure(I, $\beta$, attr) \\
        9. $\quad$ all-score = (l, t, attr) * $\beta$  \\
        10. \textbf{until} all-score is convergent \\
    \end{algorithm}

% the classical method
%    \begin{algorithm}
%        \label{alg:ps}
%        \caption{\textbf{Procedure} Pictorial Structure} 
%        \textbf{Data:} An image I to predict, Model $\beta$ , Globel Attribute type\\
%        \textbf{Result:} pose part locations, clothing attribute type \\
%        1. \textbf{for} j := 1 \textbf{to} num-attribute-values \textbf{do} \\
%        2. $\quad$ (l, t) = classical-pictorial-structure(I, $\beta$) \\
%        3. $\quad$ compute $\Psi(I, p_i, c_j) = F_j(p_i) \otimes Id(c_j)$ \\
%        4. $\quad$ score = equation \ref{eq:pose}, score function \\
%        5. \textbf{end} \\
%        6. choose the attribute type related to the maxmium score \\
%    \end{algorithm}


\section{Learning}
    \subsection{Initial Beta}
    Train appearance model and deformation model seperatly on BUFFY dataset, 18 parts.
    Train clothing attributes seperatly, sleeve, neckline, texture.

    \subsection{Update Beta}
    Learning corresponds to update parameter $\beta$.
    We use the Latent SVM algorithm to solve our problem.
    We update $\beta$ by minimizing the objective function,
    \begin{equation}
        L_D(\beta, Z) = \frac{1}{2} \|\beta\|^2 + C \sum_{i=1}^{n}max(0, 1 - y_i f_{\beta}(x_i, Z(x_i))
    \end{equation}
    where $D$ is the labeled examples set, $D = (<x_1, y_1>, \cdots, <x_n, y_n>)$, where $y_i \in \left\{1, -1\right\}$, 
    $max(0, 1 - y_i f_{\beta}(x_i, Z(x_i))$ is the standard hinge loss and the constant $C$ is the relative weight of the regularization term.

    In practice we minimize $L_D(\beta, Z)$ using a "coordinate descent" approach:
    1) \em{Relabel positive examples:}  Optimize $L_D(\beta, Z)$ over $Z$ by selecting the highest scoring latent value for each positive examples, 
    $z_i = argmax_{z \in Z(x_i)} \beta \cdot \Psi(x_i, z)$.
    2) \em{Optimize beta:} Optimize $L_D(\beta, Z)$ over $\beta$ by solving the convex optimization problem defined by $L_{D(Z)}(\beta)$.
    We use Pagasos SVM Solver to implement sgd.

    \begin{algorithm}
    %\SetLine
    \label{alg:train}
    \caption{\textbf{Procedure} Train}
    \textbf{Data:} \\
        Positive examples $P = {(I_1, (l, t, c)_1), \cdots, (I_n, (l, t, c)_n)}$ \\
        Negative images $N = {J_1, \cdots, J_m}$  \\
        Initial model $\beta$ \\
    \textbf{Result:} Final Model $\beta$  \\
    1. \textbf{repeat} \\
    2. $\quad$ $F$ := $\emptyset$;  \\
    3. $\quad$ \textbf{for} i:=1 \textbf{to} num-attribute-value \\
    4. $\quad$ $\quad$ score := $\beta \cdot \Psi(x_i, z)$ \\
    5. $\quad$ $\quad$ Add $<i, score>$ to $F$  \\
    6. $\quad$ \textbf{endfor} \\
    7. $\quad$ choose the attribute value corresponds to max score; \\
    8. $\quad$ Use Pagasos SVM solver to minium $L_D(\beta, Z)$; \\
    9. \textbf{until} {$\beta$ is convergent}
    \end{algorithm}

\section{Testing}
    Pose estimation: PCP, PCK, APK
    Clothing Attributes: Precision


\section{Implemetation and Experiments}
%\figure{2. an image to visualize the hog model}
    
\section{Conclusion}
recall our motivation, and find some work in the future.

\begin{thebibliography}{1}
    \bibitem {DBLP:conf/cvpr/YangR11}
    Yi Yang, Deva Ramanan.:
    Articulated pose estimation with flexible mixtures-of-parts.
    In: CVPR, pp. 1385--1392 (2011)
\end{thebibliography}

\end{document}
