\documentclass{acm_proc_article-sp}
%\documentclass[times, 10pt,twocolumns]{article}
\usepackage{times}
%\usepackage[english]{algorithm2e}
\usepackage{algorithm}
\usepackage{algpseudocode}
%\usepackage[named]{algo}
%\algref{<algorithm>}{<line>}
\newtheorem{theorem}{Theorem}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{Observation}[theorem]{Observation}
\def\candidate{{\cal C}}
\def\comment#1{}
\usepackage{graphicx}
\input{psfig}


\pagestyle{empty}

\begin{document}

\title{Finding Semantics in Time Series}

%\numberofauthors{1}
% \author{
% \alignauthor Peng Wang $^\dag$\hspace{1cm}Haixun Wang $^\ddag$\hspace{1cm}Wei Wang $^\dag$\\
% \ \\
%        \affaddr{\hspace{1.35cm}$^\dag$Fudan University\hspace{4.3cm}$^\ddag$IBM T. J. Watson Research Center\hspace{.5cm}}\\
%        \affaddr{\hspace{1cm}Shanghai, China\hspace{5cm}Hawthorne, NY 10533, USA}\\
%        \affaddr{\{pengwang5,weiwang1\}@fudan.edu.cn\hspace{2cm} \mbox{\hspace{1cm}}haixun@us.ibm.com\mbox{\hspace{1cm}}}
% }


\maketitle \thispagestyle{empty} \begin{abstract}
  Mining time series has many important applications and poses many
  interesting challenges. An extensive works have been proposed to deal with
  time series data, including
representations of time series, time series
prediction, anomaly value detection,
classification and clustering. Different types of patterns are proposed
to represent time series approximately. But their goal is to index time series
to speedup the similarity subsequence matching. In this paper, we claim that
modeling semantics in time series not only helps to
provide better understanding of time series, but also allows for a more accurate forecast,
and high level correlation detection.
  For this sake, we propose a pattern-based Hidden Markov model (PHMM).
  An iterative refinement approach is proposed to identify latent states and learn
  the model. Our approach uses feedbacks from latent states to refine the
  segmentation and clustering process, which in turn allows us to
  capture the latent states more accurately. Three pruning strategies are
  proposed to speed up the refinement process, which makes our
  approach efficient. With PHMM, the obtained patterns and the
  temporal relations between them helps to make advanced prediction
  and detection more general correlations between different time series.
  Empirical results on real datasets demonstrate the feasibility and
effectiveness of the proposed approach.
\end{abstract}

\input{introduction3}
\input{approach2}
\input{application}
\input{Experiment}
\input{relatedwork}
%\comment{
\section{Conclusion}
\label{sec:conclusion} In this paper, we propose a novel approach of
mining Markov model for time series forecasting. The most
challenging task in Markov modeling is to identify the latent
states. Traditionally, this task is achieved by clustering. However,
for time series, what is the basic object for clustering is not
clear, and we show in the paper that the choice of such objects is
crucial for the effectiveness of Markov modeling. We present an
iterative learning approach to learn the model. In each iterative
round, the context-based line segmentation, clustering and
aggregation steps are performed to learn the latent states of Markov
model. And the previous learned model in turn guides the learning
process of next round. The forecasting-oriented clustering and
composite states approaches are proposed to improve the predictive
power of latent states. We showed that the technique is very
effective in modeling time series and hence achieves a good accuracy
in time series forecasting. In our future work, we plan to extend
the approach to solve specific problems in system managements and
finance.
\bibliographystyle{plain}
\bibliography{haixun}

\end{document}

% \appendix
% %Appendix A
% %\section{Bottom-up Segmentation Algorithm}
% \section{Segmentation Algorithm}
% \label{sec:seg}
% %Table 1 contains the notations used in this section.

% %\begin{table*}[!htb]
% %\centering
% %\begin{tabular}{|c|c|}\hline
% %    T     & A time series in the form $t_1,t_2,\cdots,t_n$\\
% %\hline
% %T[a:b]    & The subsection of T from a to b, $t_a,t_{a+1},\cdots,t_b$\\
% %\hline Seg\_TS & A piecewise linear approximation of a time series
% %of length $n$ with $K$ segments. \\
% % & Individual segments can be
% %addressed with $Seg\_TS(i)$.\\
% %\hline create\_segment(T)     &  A function which takes in a time
% %series and returns a linear\\
% % & segment approximation of it.\\
% %\hline calculate\_error(T)     &  A function which takes in a time
% %series and returns the approximation
% %error\\
% % & of the linear segment approximation of it.\\
% %\hline
% %concat(Seg\_TS,seg)    &  add a new segment to the set Seg\_TS\\
% %\hline
% %\end{tabular}
% %\caption{The notations}
% %\end{table*}





% %\subsection{Sliding window algorithm}

% %\begin{algorithm}{Sliding\_Window\_Seg}{
% %    \label{algo:swg}
% %\qinput $T$: time series; \qinput $max\_error$
% %    }
% %anchor=1; \\
% %\qwhile not finished segmenting time series \\
% %    i=2;\\
% %    \qwhile $calculate\_error(T[anchor: anchor + i ]) < max\_error$
% %    \\
% %    i=i+1;
% %    \qelihw\\
% %    Seg\_TS = concat(Seg\_TS, create\_segment(T[anchor: anchor +
% %(i-1)]);\\
% %    anchor = anchor + i;\qelihw
% %\end{algorithm}

% We use $Y[i:j]$ to denote the subsequence $y_i,\cdots. y_j$, and
% $Seg[i:j]$ to denote the least-square-error line for $Y[i:j]$.
% $Seg_i$ is the $i^{th}$ segment line.

% \begin{algorithm}{Bottom\_up\_Seg}{
%     \label{algo:bu}
% \qinput $Y=(y_1,y_2,\cdots,y_n)$: time series; \qinput $max\_error$;
% \qoutput $Seg_1,Seg_2,Seg_m$, m is the the number of segment lines }
% \qfor $i=1:\lfloor \frac{n}{2}\rfloor$\\
%     compute $Seg[2i-1:2i]$ and $Seg_i=Seg[2i-1:2i]$;\qrof\\
% %    Seg\_Ts=concat(Seg\_TS,create\_segment(T[i:i+1]));\qrof\\
% \qfor $i=1:\lfloor \frac{n}{2}\rfloor-1$\\
%     merge\_cost(i)=square error of merge $Seg_i$ and $Seg_{i+1}$;\qrof\\
% \qwhile min(merge\_cost) $<$ max\_error\\
%     $j$=min(merge\_cost);\\
%     merge $Seg_{j}$ and $Seg_{j+1}$ to a new segment line;\\
%     $Seg_{j}$=new segment line;\\
%     delete $Seg_{j+1}$;\\
% %    merge\_cost(index)=calculate\_error([merge(Seg\_TS(index),Seg\_TS(index+1))]);\\
%     update merge\_cost(j-1) and merge\_cost(j);\qelihw
% \end{algorithm}

%\subsubsection{Query probability computing}
%For queried value or pattern, we can compute the probability of the
%query belonging to each state and the occurrence time point.

%\subsubsection{State pattern index building}
%To speed up the comparing between the current subsequence and the
%patterns of all states, we need to build an index for the patterns
%of all states.


%\subsection{Step 3: Continuous query processing}
%We monitor the time series, and compute the probability of the
%current subsequence belonging to each state. Combining the above
%probability with the transition probability and the probability of
%the query belonging to each state, we can make prediction.


%\section{semi-markov segmental markov model}
%This model can allow us to specify shape of the pattern within each
%state, the mean and variance of the duration length for each segment
%and the ordering of the segments.

%Semi-markov model is used to represent the duration time of states.


%The output probability distribution of state i will be of the form
%\[p(y_{m+1}y_{m+2}\cdots y_{m+d_i}|s_i)=p(d_i|s_i)p(\theta _i|s_i)\prod _{t=m+1}^{m+d_i} {p(y_t|f_i(\theta _i,t))}\]
