\documentclass[3p,twocolumn]{elsarticle}
\usepackage{amsmath, amssymb, amsfonts, amsthm}
\theoremstyle{plain}
\usepackage{lineno,hyperref}

\newtheorem*{theorem}{Theorem}
\newtheorem*{example}{Example}
\newtheorem{lemma}{Lemma}
\newtheorem*{conjecture}{Conjecture}
\newcommand{\LCE}{\mathit{LCE}}

\modulolinenumbers[5]
\journal{Information Processing Letters}
\bibliographystyle{elsart-num-sort}%\bibliographystyle{elsarticle-num}

\begin{document}

\begin{frontmatter}

\title{Computing Runs on a General Alphabet}
%\tnotetext[mytitlenote]{Fully documented templates are available in the elsarticle package on \href{http://www.ctan.org/tex-archive/macros/latex/contrib/elsarticle}{CTAN}.}

%% Group authors per affiliation:
\author{Dmitry Kosolobov} %\fnref{myfootnote}}
\address{Ural Federal University, Ekaterinburg, Russia}
%\fntext[myfootnote]{Since 1880.}

\begin{abstract}
We describe a RAM algorithm computing all runs (=maximal repetitions) of a given string of length $n$ over a general ordered alphabet in $O(n\log^{\frac{2}3} n)$ time and linear space. Our algorithm outperforms all known solutions working in $\Theta(n\log\sigma)$ time provided $\sigma = n^{\Omega(1)}$, where $\sigma$ is the number of distinct letters in the input string. We conjecture that there exists a linear time RAM algorithm finding all runs.
\end{abstract}


\begin{keyword}
runs \sep general alphabet \sep maximal repetitions \sep linear time \sep  repetitions
\end{keyword}

\end{frontmatter}

\linenumbers

\section{Introduction}

Repetitions of strings are fundamental objects in both stringology and combinatorics on words. In some sense the notion of \emph{run}, introduced by Main~\cite{Main}, allows to grasp the whole repetitive structure of a given string in a relatively simple form. Recall that a run of a string is a nonextendable with the same minimal period substring whose minimal period is at most half of its length. In~\cite{KolpakovKucherov} Kolpakov and Kucherov showed that any string of length $n$ contains $O(n)$ runs and proposed an algorithm computing all runs in linear time on an integer alphabet $\{0,1,\ldots, n^{O(1)}\}$ and $O(n\log\sigma)$ time on a general ordered alphabet, where $\sigma$ is the number of distinct letters in the input string. Recently, Bannai et al. described another interesting algorithm computing all runs in $O(n\log\sigma)$ time~\cite{BannaiIInenagaNakashimaTakedaTsuruta}. Modifying the approach of \cite{BannaiIInenagaNakashimaTakedaTsuruta}, we prove the following theorem.
\begin{theorem}
For a general ordered alphabet, there is an algorithm that computes all runs in a string of length $n$ in $O(n\log^{\frac{2}{3}} n)$ time and linear space.
\end{theorem}
This is in contrast to the result of Main and Lorentz \cite{MainLorentz} who proved that any algorithm deciding whether a string over a general \emph{unordered} alphabet has at least one run requires $\Omega(n\log n)$ comparisons in the worst case.

Our algorithm outperforms all known solutions provided $\sigma = n^{\Omega(1)}$. It should be noted that the algorithm of Kolpakov and Kucherov can hardly be improved in a similar way since it strongly relies on a structure (namely, the Lempel-Ziv decomposition) that cannot be computed in $o(n\log\sigma)$ time on a general ordered alphabet (see \cite{Kosolobov}).

Based on some theoretical observations of \cite{Kosolobov}, we conjecture that one can further improve our result.
\begin{conjecture}
For a general ordered alphabet, there is a linear time algorithm computing all runs.
\end{conjecture}


\section{Preliminaries}

A \emph{string of length $n$} over an alphabet $\Sigma$ is a map $\{1,2,\ldots,n\} \mapsto \Sigma$, where $n$ is referred to as the length of $w$, denoted by $|w|$. We write $w[i]$ for the $i$th letter of $w$ and $w[i..j]$ for $w[i]w[i{+}1]\ldots w[j]$. A string $u$ is a \emph{substring} (or a \emph{factor}) of $w$ if $u=w[i..j]$ for some $i$ and $j$. The pair $(i,j)$ is not necessarily unique; we say that $i$ specifies an \emph{occurrence} of $u$ in $w$. A string can have many occurrences in another string. A substring $w[1..j]$ [respectively, $w[i..n]$] is a \emph{prefix} [respectively, \emph{suffix}] of $w$. An integer $p$ is a \emph{period} of $w$ if $0 < p < |w|$ and $w[i] = w[i{+}p]$ for all $i=1,\ldots,|w|{-}p$. For integers $i$ and $j$, the set $\{k\in \mathbb{Z} \colon i \le k \le j\}$ (possibly empty) is denoted by $[i..j]$. Denote $[i..j) = [i..j{-}1]$ and $(i..j] = [i{+}1..j]$.

A \emph{run} of a string $w$ is a substring $w[i..j]$ whose period is at most half of the length of $w[i..j]$ and such that both substrings $w[i{-}1..j]$ and $w[i..j{+}1]$, if defined, have strictly greater minimal periods than $w[i..j]$.

Hereafter, $w$ denotes the input string of length $n$.

In the \emph{longest common extension ($\LCE$)} problem one has the queries $\LCE(i,j)$ returning for given positions $i$ and $j$ of $w$ the length of the longest common prefix of the suffixes $w[i..n]$ and $w[j..n]$. It is well known that one can perform the $\LCE$ queries in constant time after a preprocessing of $w$ requiring $O(n\log\sigma)$ time, where $\sigma$ is the number of distinct letters in $w$ (e.g., see \cite{HarelTarjan}). It appears that the time consumed by the $\LCE$ queries is dominating in the algorithm of \cite{BannaiIInenagaNakashimaTakedaTsuruta}; namely, one can easily prove the following lemma.
\begin{lemma}[{see \cite[Alg. 1 and Sect. 4.2]{BannaiIInenagaNakashimaTakedaTsuruta}}]
Suppose we can compute any sequence of $O(n)$ $\LCE$ queries on $w$ in $O(f(n))$ time for some function $f(n)$; then we can find all runs of $w$ in $O(n + f(n))$ time.\label{LCEtoRuns}
\end{lemma}

In what follows we describe an algorithm that computes $O(n)$ $\LCE$ queries in $O(n\log^{\frac{2}3} n)$ time and thus prove Theorem using Lemma~\ref{LCEtoRuns}. The key notion in our construction is a \emph{difference cover}. Let $k\in \mathbb{N}$. A set $D \subset [0..k)$ is called a difference cover of $[0..k)$ if for any $x \in [0..k)$, there exist $y,z \in D$ such that $y - z \equiv x\pmod{k}$. Clearly $|D| \ge \sqrt{k}$. Conversely, for any $k \in \mathbb{N}$, there is a difference cover of $[0..k)$ with $O(\sqrt{k})$ elements and it can be constructed in $O(k)$ time (see \cite{BurkhardtKarkkainen}).
\begin{example}
The set $D = \{1,2,4\}$ is a difference cover of~$[0..5)$.

\begin{minipage}{0.15\textwidth}
$$
\begin{array}{c|c}
x & y,z\\
\hline
0 & 1,1\\
1 & 2,1\\
2 & 1,4\\
3 & 4,1\\
4 & 1,2
\end{array}
$$
\end{minipage}%
\hfill%
\begin{minipage}{0.6\textwidth}
\includegraphics[scale=0.20]{diffcover}\\
\small (the figure is from~\cite{BilleGortzSachVildhoj}.)
%[Beller T., Gog S., Ohlebusch E., Schnattinger T. ``Computing the longest common prefix array based on the Burrows--Wheeler transform''].)
\end{minipage}
\end{example}
%Our algorithm utilizes the following interesting property of difference covers.
\begin{lemma}[see \cite{BurkhardtKarkkainen}]
Let $D$ be a difference cover of $[0..k)$. For any integers $i,j$, there exists $d \in [0..k)$ such that $(i - d) \bmod k \in D$ and $(j - d) \bmod k \in D$.\label{DiffCoverProperty}
\end{lemma}


\section{Longest Common Extensions}

At the beginning, our algorithm fixes an integer $\tau$ (the precise value of $\tau$ is given below). Let $D$ be a difference cover of $[0..\tau^2)$ of size $O(\tau)$. Denote $M = \{i \in [1..n] \colon (i \bmod \tau^2) \in D\}$. Obviously, we have $|M| = O(\frac{n}{\tau})$. Our algorithm builds in $O(\frac{n}{\tau}(\tau^2 + \log n)) = O(\frac{n}{\tau}\log n + n\tau)$ time a data structure that can calculate $\LCE(i, j)$ in constant time for any $i,j \in M$. To compute $\LCE(i, j)$ for arbitrary $i, j \in [1..n]$, we simply compare $w[i..n]$ and $w[j..n]$ from left to right until we reach the positions $i+d$ and $j+d$ such that $i+d \in M$ and $j+d \in M$, and then we obtain $\LCE(i, j) = d + \LCE(i + d, j + d)$ in constant time. By Lemma~\ref{DiffCoverProperty}, we have $d < \tau^2$ and therefore, the value $\LCE(i, j)$ can be computed in $O(\tau^2)$ time. Thus, our algorithm can execute any sequence of $O(n)$ $\LCE$ queries in $O(\frac{n}{\tau}\log n + n\tau^2)$ time. Putting $\tau = \lceil\log^{\frac{1}3} n\rceil$, we obtain $O(\frac{n}{\tau}\log n + n\tau^2) = O(n\log^{\frac{2}{3}} n)$. Now it suffices to describe the data structure answering the $\LCE$ queries on the positions from $M$.

The data structure that we build in the preprocessing step is the minimal in the number of vertices compacted trie $T$ such that for any $i \in M$, the string $w[i..n]$ can be spelled out on the path from the root to some leaf of $T$ (see Figure~\ref{fig:treeT}). We store the labels on the edges of $T$ as pointers to substrings of $w$. The trie $T$ is commonly referred to as a \emph{sparse suffix tree}. Obviously, $T$ occupies $O(\frac{n}{\tau})$ space. For simplicity, we assume that $w[n]$ is a special letter that does not occur in $w[1..n{-}1]$, so, for each $i \in M$, the suffix $w[i..n]$ corresponds to some leaf of $T$.

Let $i, j \in M$. It is straightforward that $\LCE(i, j)$ is equal to the length of the string written on the path from the root of $T$ to the nearest common ancestor of the leaves corresponding to the suffixes $w[i..n]$ and $w[j..n]$. Using the construction of~\cite{HarelTarjan}, one can preprocess $T$ in $O(\frac{n}{\tau})$ time such that the nearest common ancestor of any two leaves can be found in constant time. So, to finish the proof, it remains to describe how to build $T$ in $O(\frac{n}{\tau}(\tau^2 + \log n))$ time.

In general our construction is similar to that of~\cite{Kosolobov3}. We use the fact that the set $M$ has the ``period'' $\tau^2$, i.e., for any $i\in M$, we have $i + \tau^2 \in M$ provided $i + \tau^2 \le n$. Our algorithm consecutively inserts the suffixes $\{w[i..n] \colon i \in M\}$ in $T$ from right to left. Suppose for some $k \in M$, we already have a compacted trie $T$ that contains the suffixes $w[i..n]$ for all $i \in M \cap (k..n]$. We are to insert the suffix $w[k..n]$ in $T$. To perform the insertion efficiently, we maintain four additional data structures.

\paragraph{1. An order on the leaves of $T$} We store all leaves of $T$ in a linked list in the lexicographical order of the corresponding suffixes. We maintain on this list the order maintenance data structure of~\cite{BenderColeDemaineFarachColtonZito} that allows to determine whether a given leaf precedes another leaf in the list in constant time. The insertion in this list takes constant amortized time. Hereafter, we say that a leaf $x$ of $T$ precedes [respectively, succeeds] another leaf $y$ if $x$ precedes [respectively, succeeds] $y$ in the list of leaves.

\paragraph{2. Slow $\LCE$ queries} Denote by $i_1, i_2, \ldots, i_m$ the sequence of all positions $M\cap (k..n]$ in the increasing lexicographical order of the corresponding suffixes $w[i_1..n], w[i_2..n], \ldots, w[i_m..n]$. For each $i_p \in M \cap (k..n]$, we associate with the leaf corresponding to the suffix $w[i_p..n]$ the value $\LCE(i_p, i_{p+1})$. It is easy to see that for any $i_p, i_q \in M\cap (k..n]$ such that $p < q$, we have $\LCE(i_p, i_q) = \min\{\LCE(i_p, i_{p+1}),$ $\LCE(i_{p+1}, i_{p+2}), \ldots,$ $\LCE(i_{q-1}, i_q)\}$. According to this observation, we store all leaves of $T$ in an augmented balanced search tree $C$ that allows to calculate $\LCE(i_p, i_q)$ for any such $i_p$ and $i_q$ in $O(\log n)$ time. It is well known that the insertion in $C$ of a new leaf with the associated $\LCE$ value requires $O(\log n)$ amortized time.

\paragraph{3. The ``top'' part of $T$} We maintain a compacted trie $S$ that contains the strings $w[i..i{+}\tau^2]$ for all $i \in M \cap (k..n]$ (we assume $w[j] = w[n]$ for all $j > n$ and thus $w[i..i{+}\tau^2]$ is always well defined). Informally, $S$ is the ``top'' part of $T$, so, we augment each vertex of $S$ with a link to the corresponding vertex of $T$. We maintain on $S$ the data structure of~\cite{FranceschiniGrossi} supporting the insertions in $O(\tau^2 + \log n)$ amortized time. Let $x$ be a leaf of $S$ corresponding to a string $w[i..i{+}\tau^2]$. We augment $x$ with a balanced search tree $B_x$ that contains the leaves of $T$ corresponding to all suffixes $w[j..n]$ such that $w[j{-}\tau^2..j] = w[i..i{+}\tau^2]$ in the order induced by the list of all leaves of $T$ (see Figure~\ref{fig:treeS}). One can easily show that $S$ together with the associated search trees occupies $O(\frac{n}{\tau})$ space in total.

\paragraph{4. Dynamic weighted ancestors} We maintain on $T$ the \emph{dynamic weighted ancestor} data structure of~\cite{KopelowitzLewenstein} that, for any given vertex $x$ and an integer $c$, can find in $O(\log n)$ time the nearest ancestor of $x$ such that the length of the string written on the path from the root to this ancestor is less than $c$. When we insert a new vertex in $T$, the modification of this structure takes $O(\log n)$ amortized time.

\begin{example}
Let $\tau^2 = 4$. The set $D = \{0,1,3\}$ is a difference cover of $[0..\tau^2)$. Consider the string $w = \underline{a}b\underline{c}\underline{a}\underline{b}c\underline{a}\underline{b}\underline{a}b\underline{c}
\underline{a}\underline{b}b\underline{\$}$; the underlined positions are from $M = \{i \in [1..n] \colon (i\bmod \tau^2) \in D\}$. The sparse suffix tree of $w$ is presented in Figure~\ref{fig:treeT}. Figure~\ref{fig:treeS} depicts the corresponding compacted trie $S$; each leaf of $S$ is augmented with a balanced search tree of certain leaves of $T$ (see the description above).
\begin{figure}[htb]
\includegraphics[scale=0.35]{treeT}
\caption{The sparse suffix tree $T$ for $w = \underline{a}b\underline{c}\underline{a}\underline{b}c\underline{a}\underline{b}\underline{a}b\underline{c}\underline{a}\underline{b}b
\underline{\$}$ (the underlined positions are from $M$).}
\label{fig:treeT}
\end{figure}
\begin{figure}[htb]
\includegraphics[scale=0.35]{treeS}
\caption{The balanced search trees $B_1, B_2, \ldots, B_9$ are augmented with the indices of leaves of $T$.}
\label{fig:treeS}
\end{figure}
\end{example}

\paragraph{The construction of $T$} Now to insert $w[k..n]$ in $T$, we first insert $w[k..k{+}\tau^2]$ in $S$ in $O(\tau^2 + \log n)$ time. If $S$ does not contain $w[k..k{+}\tau^2]$, then we attach a new leaf in $T$ using the links from $S$ to $T$ and modify in an obvious way all related data structures: the list of leaves of $T$, the newly created balanced search tree associated with the new leaf of $S$, the balanced search tree $C$, and the dynamic weighted ancestor data structure on $T$. The modifications require $O(\log n)$ amortized time.

Now suppose $S$ contains $w[k..k{+}\tau^2]$. Denote by $v$ the leaf of $S$ corresponding to $w[k..k{+}\tau^2]$. Let $y$ be the leaf of $T$ corresponding to the suffix $w[k{+}\tau^2..n]$ (recall that $k{+}\tau^2 \in M$). In $O(\log n)$ time we obtain the immediate predecessor and successor of $y$ in the search tree $B_v$, denoted by $x$ and $z$, respectively. Notice that $x$ is the immediate predecessor only in the set of all leaves contained in $B_v$ but it may not be the immediate predecessor in the whole list of all leaves of $T$; the situation with $z$ is similar. Let $x$ and $z$ correspond to suffixes $w[i_x..n]$ and $w[i_z..n]$, respectively. Since $w[i_x{-}\tau^2..i_x] = w[i_z{-}\tau^2..i_z] = w[k..k{+}\tau^2]$, it is straightforward that the suffixes $w[i_x{-}\tau^2..n]$ and $w[i_z{-}\tau^2..n]$ are, respectively, the immediate predecessor and successor of the suffix $w[k..n]$ in the set of all suffixes inserted in $T$. Hence, we must insert $w[k..n]$ between these suffixes.

It is easy to see that $\LCE(k, i_x{-}\tau^2) = \tau^2 + \LCE(k{+}\tau^2, i_x)$ and $\LCE(k, i_z{-}\tau^2) = \tau^2 + \LCE(k{+}\tau^2, i_z)$. The values $\LCE(k{+}\tau^2, i_x)$ and $\LCE(k{+}\tau^2, i_z)$ can be computed in $O(\log n)$ time using the balanced search tree $C$. Without loss of generality consider the case $\LCE(k, i_x{-}\tau^2) \ge \LCE(k, i_z{-}\tau^2)$. We find the position where we insert a new leaf in $T$ using the weighted ancestor query on the value $\LCE(k, i_x{-}\tau^2)$ and the leaf of $T$ corresponding to the suffix $w[i_x{-}\tau^2..n]$. We finally modify all related data structures in an obvious way: the list of leaves of $T$, the balanced search trees $B_v$ and $C$, and the dynamic weighted ancestor data structure on $T$. These modifications require $O(\log n)$ amortized time.

\paragraph{Time and space} The insertion of a new suffix in $T$ takes $O(\tau^2 + \log n)$ amortized time. Thus, the construction of $T$ consumes overall $O(\frac{n}{\tau}(\tau^2 + \log n))$ time as required. The whole data structure occupies $O(\frac{n}{\tau})$ space.


\section{Conclusion}

It seems that further improvements in the considered problem may be achieved by more and more efficient longest common extension data structures on a general ordered alphabet. One even might conjecture that there is a data structure that can execute any sequence of $k$ $\LCE$ queries on a string of length $n$ over a general ordered alphabet in $O(k + n)$ time. However, we do not yet have a theoretical evidence for such strong results.

Another interesting direction is a generalization of our result for the case of online algorithms (e.g., see~\cite{HongChen} and~\cite{Kosolobov2}).


\section*{References}

\bibliography{faster_runs}

\end{document}

