%
%Consider the same algorithm we use with the following simple modification: If $\ell' \geq \ell \sqrt{\ell/ k}$, remove $e \times \ell$ vectors $u$ from the sketch (and from $\Lambda$), specifically those such that $\sum \ip{x_t,u}^2$ is minimal. We can approximate this quantity up to an error of $\|X^{(t)}\|_F^2/(\ell \sqrt{\ell/ k})$ via another sketch of $X$ requiring $O(d\ell \sqrt{\ell/ k})$ space. Here $X^{(t)}$ is the matrix consisting of the first $t$ columns of $X$ and $t$ is the time point were we reached $\ell' \geq \ell \sqrt{\ell/ k}$.
%The (additive) error induced by this ditch is at most 
%$$\frac{\|X^{(t)}\|_F^2 \cdot 2 e \ell}{\ell \sqrt{\ell/ k}} = 2e\sqrt{\frac{k}{\ell}}\|X^{(t)}\|_F^2$$
%Now, by the time $t'$ we will reach $\ell' \geq \ell \sqrt{\ell/k}$ again we must have added $e \times \ell$ new vectors. This means that 
%$$ \|X^{(t')}\|_F^2 \geq e \times \|X^{(t)}\|_F^2 $$
%Hence, the loss suffered from the last ditch in terms of $\|X^{(t')}\|_F^2$ is at most 
%$$ 2e\sqrt{\frac{k}{\ell}}\|X^{(t')}\|_F^2 / e$$
%In general we get that the total loss from ditching $e\ell$ vectors once we reach $\ell' \geq \ell \sqrt{\ell/ k}$ is
%$$ 2e\sqrt{\frac{k}{\ell}}\|X\|_F^2 \cdot (1+e^{-1}+e^{-2}+\ldots) = O(\sqrt{\frac{k}{\ell}}\|X\|_F^2)$$
%It follows that w.l.o.g., our algorithm has an output of length at most $\ell$.



In this section we modify the algorithm of Section~\ref{sec:inefficient} in order to (1) improve the time and space complexity and (2) remove the assumption of knowledge of $\|X\|_F^2$. These two improvements are made at the expense of sacrificing the accuracy \emph{slightly}. In order to reduce the space and time complexity, the main idea in the modified algorithm is to keep a ``sketch'' of $C$ instead of $C$. So in Algorithm 2, the matrix $C$ is a sketch yet it plays the role its corresponding matrix in Algorithm 1. The idea that helps us to avoid assuming knowledge of $\|X\|_F^2$ is to make decisions of increasing the dimension of $U$ based upon ``current knowledge'' about the Frobenius norm of the input matrix, i.e., the Frobenius norm squared of the 
sub-matrix of $X$ observed up to the current iteration. Without an additional trick this leads to an sketch size dependent on $\log(n)$, the number of columns. In order to remove this dependency over $n$, we work with a fixed, albeit slightly larger budget than before, and when we require to add a column to a full matrix $U$, we simply remove a column that is ``the least helpful'', in the sense that it covers a small amount of weight from the vectors observed so far. 
%Also, $||X||_{\mathrm{F}}$ is not part of the input; the decisions to increase the dimension of $U$ are made based upon the ``current knowledge'' about the Frobenius norm of the input matrix, i.e., the Frobenius norm squared of the 
%sub-matrix of $X$ observed up to the current iteration. 
Lastly, to avoid technical issues, we assume some prior knowledge over the quantity $\|X\|_F^2$; we merely assume that we have some quantity $w_0$ such that $w_0 \leq \|X\|_F^2$ and $w_0 \gg \|x_t\|^2$ for all $\|x_t\|$. We state that this assumption is meant only to simplify the analysis of the algorithm, and it is not a difficult task to adapt the algorithm to work without any knowledge regarding $\|X\|_F^2$.

Throughout we work with several matrices that are updated during the running time of the algorithm. For such a matrix $A$ we denote its value at round $t$ by $A_t$. In additional we write $R_t,X_t$ to describe the matrices whose columns are $r_1,\ldots,r_t$ and $x_1,\ldots,x_t$ correspondingly.





\begin{algorithm}[h!]\label{alg3}
\begin{algorithmic}
\STATE input: $X \in \R^{d \times n}$, $k < d$, $w_0$ a lower bound for $\|X\|_F^2$, parameter $0 < \eps \leq 0.1$. 
\STATE $U = 0^{d \times k/\eps^3}$, $W = 0^{d \times k/\eps^3}$, $Z = 0^{d \times k/\eps^2}$, $w=0$
\FOR {$t = 1,...,n$}
\STATE $w = w + \|x_t\|_2^2$
\STATE sketch $x_t$ into $W$
\STATE $r_t = x_t - U U^\top  x_t$
\STATE $C = (I-U U^\top)ZZ^\top(I-U U^\top)$
\WHILE {$\|C+r_t r_t^\top\| \geq \max\{w_0,w\} \cdot \frac{k}{\eps^2}$.}
	\STATE $u = \operatorname{topEigenvector}(C)$
	\STATE If $U$ has a zero column, write $u$ in its place. Otherwise, write $u$ instead of the column $v$ of $U$ with the minimal quantity of $\|Wv\|$
	\STATE $C = (I-U U^\top)ZZ^\top(I-U U^\top)$
	\STATE $r_t = x_t - U U^\top  x_t$
\ENDWHILE
\STATE sketch $r_t$ into $Z$
\STATE $y_t \gets U^\top x_t$
\ENDFOR
\end{algorithmic}
\caption{An efficient online PCA algorithm}
\end{algorithm}



\begin{Obs} \label{obs:U orth str}
The vectors of $U$ are orthonormal
\end{Obs}
\begin{proof}
The proof is analogous to that of Observation~\ref{obs:U orth}
\end{proof}


\begin{lemma} \label{lem:us singular}
Let $Z_t$ be the matrix $Z$ at time $t$. Let $u_1,\ldots,u_{\ell}$ be the columns of $U_t$ ($U$ at that time). Then  $u_1,\ldots,u_{\ell}$ are left singular vectors of $Z_t$ (possibly with singular value zero).
\end{lemma}
\begin{proof}
We prove the claim by induction on $t$, with a trivial base case of $t=0$. Let $t>0$. Let $u$ be a vector that is a column in $U_{t-1}$. Then $r_t$ is orthogonal to $u$, meaning that according to Lemma~\ref{lem:sketch prop}, $u$ remains a singular vector of $Z_t Z_t^\top$. Now, assume that at time $t$, a vector $u$ is added to $U$. We write $U_t$ as the matrix that does not yet contain $u$. By the induction hypothesis
$$Z_t Z_t^\top =  Z_tZ_t^\top (I-U_t U_t^\top) +  U_t \Lambda U_t^\top = C_t + U_t \Lambda U_t^\top$$
where $\Lambda$ is some diagonal matrix. It follows that that $u$, the top eigenvector of $C_t$ is a singular vector of $Z_t$, proving the claim.
\end{proof}


\begin{lemma}\label{lem2strong}
$ \|R\|_2^2 \leq \frac{2\eps^2}{k} \|X\|_{\mathrm{F}}^2.$
\end{lemma}
\begin{proof}
We abuse notations and denote by $Z$ the matrix obtained at the end of the execution of the algorithm. Let $u_1,\ldots,u_{\ell'}$ and $\lambda_1,\ldots,\lambda_{\ell'}$ be the columns of $U$ at the termination of the algorithm and their corresponding eigenvalues in $C$ at the time of their addition to $U$.
From Lemmas~\ref{lem:us singular} and~\ref{lem:sketch prop} we have that
$$ Z Z^\top = V \Lambda' V^\top + U \Lambda U^\top$$
with $\Lambda$ being a diagonal $\ell' \times \ell'$ matrix with $\Lambda_{jj} \leq \lambda_j \leq \frac{\eps^2}{k} \|X\|_F^2 $, $\Lambda'$ being some diagonal matrix, and $V$ having orthonormal columns, also orthonormal to those of $U$. According to its definition we have that $C= V\Lambda' V^\top$ and $\|C\|_2 \leq \frac{\eps^2}{k} \|X\|_F^2$.
We conclude that 
$$ \|ZZ^\top \|_2 = \max\{\|C\|_2, \|U \Lambda U^\top\|_2 \} \leq \frac{\eps^2}{k} \|X\|_F^2 $$
Next, by the sketching property, for appropriate matrix $E$: $Z Z^\top = R R^\top + E$, with $\|E\| \leq \frac{\eps^2}{k} \|R\|_F^2 $. 
It follows that
$$ \|R\|_2^2 = \|RR^\top \| = \|ZZ^\top - E\| \leq \|ZZ^\top \| + \|E\| \leq \frac{2\eps^2}{k}\|X\|_F^2$$
\end{proof}



\begin{Lem} \label{thm1strong}
$$ 
\|R\|_F^2 \le OPT_k + \sqrt{\frac{8 k}{\ell}}\cdot \|X\|_{\mathrm{F}}^2
%4 \OPT_k + \sqrt{ \frac{ 36 k}{\ell} } || X ||_{\mathrm{F}}^2
$$
\end{Lem}
\begin{proof}
The Lemma can be proven analogically to Theorem~\ref{thm1} as the only difference is the bound over $\|R\|_2^2$. 
\end{proof}




\begin{Lem}\label{lem3stronger}
Let $w' = \max\{w_0,w\}$ and assume that for all $t$, $\|x_t\|_2^2 \leq w' \cdot \frac{\eps^2}{5k}$. For any integer $\tau$ consider the iterations of the algorithm during which $w' \in [2^\tau, 2^{\tau+1})$. During this time, the while loop will be executed at most $5 k/\eps^2$ times.
\end{Lem}
\begin{proof}
The condition of the while loop is $\|C+r_t r_t^\top\| \geq w' \frac{\eps^2}{k}$. Notice that if that is the case, then
$$ \|C\| \geq \|C+ r_t r_t^\top\|-\|r_t r_t^\top\| \geq 0.8 w' \frac{\eps^2}{k} \geq 0.4 \cdot 2^{\tau+1} \frac{\eps^2}{k}  $$
Consider the case where we executed the while loop and entered a vector $u$ into a zero column of $U$. In this case, we added an amount of at least $0.4 \cdot 2^{\tau+1} \frac{\eps^2}{k}$ to the difference 
\begin{equation} \label{eq:diff ZC}
 \mathrm{Trace}(ZZ^\top) - \mathrm{Trace}(C)  \leq  \mathrm{Trace}(ZZ^\top) .
\end{equation}
Now, consider the case in which we removed a vector $v$ from $U$. This action incurs a subtraction from the quantity of the above difference. We will upper bound its quantity and show that over all, the above difference has increased by at least $0.2 \cdot 2^{\tau+1} \frac{\eps^2}{k}$. The lemma will immediately follow as the above difference is bounded by 
$$\mathrm{Trace}(ZZ^\top) \leq \mathrm{Trace}(RR^\top) \leq \mathrm{Trace}(XX^\top) \leq 2^{\tau+1} .$$

Recall that $v$ was thus chosen as an eigenvector of $C$ and denote its eigenvalue by $\lambda$.
By Lemma~\ref{lem:us singular}, $v$ is an eigenvector of $Z$ as well at that time, with eigenvalue $\lambda$. By Lemma~\ref{lem:sketch prop} we get that when removing the vector $v$ from $U$, it is still an eigenvector of $Z$ with eigenvalue $\leq \lambda$. Hence, by removing $v$ we are in fact subtracting a quantity of at most $\lambda$ from the difference in equation~\eqref{eq:diff ZC}. We proceed to upper bound this quantity $\lambda$.

To this end, notice that for any two time points $t' \leq t$, 
$$C_{t'} \preceq Z_{t'} Z_{t'}^\top \preceq R_{t'} R_{t'}^\top \preceq X_{t'} X_{t'}^\top \preceq X_t X_t^\top$$
As $\lambda = v^\top C_{t'} v$, for $t'$ the time during which $v$ was added to $U$, it follows that at the time $t$ during which we removed $v$ it holds that 
\begin{equation} \label{eq:lambda v x}
\lambda \leq \|X_t v\|_2^2.
\end{equation} 
Recall now that $v$ is chosen as the minimizer of $\|W_t v\|$. Since the columns of $U$ are orthonormal we have that\footnote{We abuse notations and refer to $U$ as a set of its non-zero columns} 
$$\sum_{v' \in U} \|W_t v'\|^2 \leq \|W_t \|_F^2 \leq \|X_t \|_F^2 = w_t$$ 
($w_t$ is the value of $w$ at time $t$) hence, we have that $\|W_t v\|^2 \leq  w_t \frac{\eps^3}{k} \leq 2^{\tau+1} \frac{\eps^2}{10 k}$. As $W$ is a sketch of $X$ with a parameter of $\frac{k}{\eps^3} \geq \frac{10k}{\eps^2}$ we get by Lemma~\ref{lem:sketch prop} that 
\begin{equation} \label{eq:X v tau}
\|X_t v\|^2 \leq \frac{2^{\tau+1} \eps^2}{10k} + \frac{\|X_t\|_F^2 \eps^2}{10k} \leq \frac{2^{\tau+1} \eps^2}{5k}
\end{equation}
Combining equations~\eqref{eq:lambda v x} and \eqref{eq:X v tau} we get that the amount subtracted to the difference in equation~\eqref{eq:diff ZC} is at most $0.2 \cdot \frac{2^{\tau+1} \eps^2}{k}$. By the analysis above however, by adding $u$ to the $U$ we have added an amount of at least $0.4 \cdot 
\frac{2^{\tau+1} \eps^2}{k}$ to the difference of equation~\eqref{eq:diff ZC}, meaning that overall we added a quantity of at least $0.2 \cdot \frac{2^{\tau+1} \eps^2}{k}$ to the difference. This concludes the proof of the lemma.
\end{proof}



\begin{lemma}\label{lem1stronger}
Let $(v_1,t_1),\ldots,(v_j,t_j),\ldots$ be the sequence of pairs of vectors removed from $U$, and the times during which they were moved.
$$\ALG_{\ell} \le \left(\|R\|_F+ \sqrt{\sum_j \|X_{t_j} v_j\|^2}  \right)^2  .$$
\end{lemma}
\begin{proof}
Consider the output matrix $Y$. We write each of its columns $y_t$ as $y_t=y_t^{(1)}+y_t^{(2)}$ where the two summands are entry disjoint. $y_t^{(1)}$ is equal to $y_t$ in the entries corresponding to unit vectors $u$ that are present in the sketch at the end of the execution of the algorithm. In the other entries we set it to zero. $y_t^{(2)}$ is the complement, with non-zero entries equal to $y_t$'s in entries correspond to vectors $v$ that were at some point removed from $U$.  

Let $U_t$ denote the projection used for the vector $x_t$ and let $Y^{(1)}$ be the matrix whose columns are the $y^{(1)}$'s. 
We denote by $U_t^{(1)}$ the matrix obtained by replacing the columns of $U_t$ that were eventually removed by zero columns. Notice that $y_t^{(1)} = U_t^{(1)}x_t$. It follows that
\begin{eqnarray*}
& & \|X- U_n Y^{(1)}\|_{\mathrm{F}}^2 \\
&=& \sum_{i=1}^{n} \|x_t - U_n (U_t^{(1)})^\top x_t\|_2^2 = \sum_{i=1}^{n} \|x_t - U_t^{(1)}(U_t^{(1)})^\top x_t\|_2^2 \leq \|R\|_{\mathrm{F}}^2
\end{eqnarray*}
Now, notice that
$$ \|U_n Y^{(2)}\|_F^2 \leq  \| Y^{(2)} \|_F^2 \leq \sum_j \|X_{t_j} v_j \|^2 $$
Finally, according to the triangle inequality, 
$$ \ALG_{\ell} \leq \|X-U_n Y\| _F^2 \leq \left( \|X - U_n Y^{(1)}\|_F + \|Y^{(2)}\|_F \right)^2 $$
which proves the claim
\end{proof} 


\begin{Lem} \label{lem:ditch loss}
Let $(v_1,t_1),\ldots,(v_j,t_j),\ldots$ be the sequence of pairs of vectors removed from $U$, and the times during which they were moved.
$$\sum_j \|X_{t_j} v_j\|^2 \leq 40\eps \|X\|_F^2  .$$
\end{Lem}
\begin{proof}
For some integer $\tau$ consider the execution of the algorithm during the period in which $w' \in [2^{\tau},2^{\tau+1})$. According to Lemma~\ref{lem3stronger}, at most $5\ell$ vectors $v$ were removed from the $U$ during that period. Furthermore, since the vectors in the sketch are orthonormal we have that $\sum_{v' \in S} \|Wv'\|^2 \leq \|W\|_F^2$, hence each removed vector $v_j$, which was chosen as the minimizer of $\|W_{t_j}v\|$ is such that 
$$\|W_{t_j} v_j\|^2 \leq \frac{\eps^3}{k} \|W_{t_j}\|_F^2 \leq \frac{\eps^3}{k} \|X_{t_j}\|_F^2$$
The last inequality is due to the properties of the sketch (Lemma~\ref{lem:sketch prop}). The same lemma implies that
$$ \|X_{t_j} v_j\|^2 \leq \frac{\eps^3}{k} \|X_{t_j}\|_F^2 + \|W_{t_j} v_j\|^2 \leq \frac{2\eps^3}{k} \|X_{t_j}\|_F^2 \leq \frac{2\eps^3}{k} 2^{\tau+1} $$
It follows that the contribution of vectors $v$ thrown from the set during the discussed time period is at most 
$$ 5\frac{k}{\eps^2} \cdot \frac{2\eps^3 }{k} 2^{\tau+1} = 10\eps \cdot 2^{\tau+1} $$
The entire sum can now be bounded by a geometric series, ending at $\tau = \left \lfloor \log_2(\|X\|_F^2)\right \rfloor$ thus proving the lemma
\end{proof}


\begin{lemma}[Time complexity]\label{lem5strong} 
Algorithm 1 requires $O(n \log(\mu n)d \ell )$ arithmetic operations.
\end{lemma}
\begin{proof}
It is an easy task to see that the time spent outside the while loop, not including the condition verification of the while loop is $O(d\ell)$ (Lemma~\ref{lem:sketch prop}). The verification of the while loop requires $O(d\ell^2)$ arithmetic operations. However, if we assume that each vector $x_i$ has norm $\|x_i\|^2 \leq \max\{w_0,w\}/\ell^2$, and make sure that inside the while loop, we add vectors to the sketch until $\|C\| \leq \max\{ w_0,w \}/(2\ell)$ then we can check the condition in a lazy fashion, and this will be done in no more than $1/\ell$ of the iterations. This concludes a running time of $O(nd\ell)$ not including the inner while loop.

By Lemma~\ref{lem3strong} the while loop will be activated at most \zk{get the exact number right}. Each such time we require $O(d\ell^2)$ arithmetic operations leading to a total running time of \zk{???}
\end{proof}

\begin{lemma}[Space complexity]\label{lem6strong}
 Algorithm 1 requires $O(d \ell \log(\mu n))$ space. \zk{fix bound}
\end{lemma}
\begin{proof}
It is clear that the algorithm maintains at most $O(\ell')$ column vectors of size $d$. The bound thus follows from the bound on $\ell'$ in Lemma~\ref{lem3strong}.
\end{proof}

