\section{Implementation of Extensions Presented in Section \ref{sec:extensions}}

In this section we modify Algorithm~\ref{alg1} in order to (1) remove the assumption of knowledge of $\|X\|_{\fro}^2$ and (2) improve the time and space complexity. These two improvements are made at the expense of sacrificing the accuracy \emph{slightly}. In order to reduce the space and time complexity, the main idea in the modified algorithm is to keep a ``sketch'' of $C$ instead of $C$. So in our modified algorithm, Algorithm 2, the matrix $C$ is a sketch yet it plays the role its corresponding matrix plays in Algorithm 1. The idea that helps us to avoid assuming knowledge of $\|X\|_{\fro}^2$ is to make decisions of increasing the dimension of $U$ based upon ``current knowledge'' about the Frobenius norm of the input matrix, i.e., the Frobenius norm squared of the 
sub-matrix of $X$ observed up to the current iteration. Without an additional trick this leads to an sketch size dependent on $\log(n)$, the number of columns. In order to remove this dependency over $n$, we work with a fixed, albeit slightly larger budget than before, and when we require to add a column to a full matrix $U$, we simply remove a column that is ``the least helpful'', in the sense that it covers a small amount of weight from the vectors observed so far. 
%Also, $||X||_{\fro}$ is not part of the input; the decisions to increase the dimension of $U$ are made based upon the ``current knowledge'' about the Frobenius norm of the input matrix, i.e., the Frobenius norm squared of the 
%sub-matrix of $X$ observed up to the current iteration. 
Lastly, to avoid technical issues, we assume some prior knowledge over the quantity $\|X\|_{\fro}^2$; we merely assume that we have some quantity $w_0$ such that $w_0 \leq \|X\|_{\fro}^2$ and $w_0 \gg \|x_t\|^2$ for all $\|x_t\|$. We state that this assumption is meant only to simplify the analysis of the algorithm, and it is not a difficult task to adapt the algorithm to work without any knowledge regarding $\|X\|_{\fro}^2$.



\begin{algorithm}[h!]\label{alg2}
\begin{algorithmic}
\STATE input: $X \in \R^{d \times n}$, $k < d$, $w_0$ a lower bound for $\|X\|_F^2$, parameter $0 < \eps \leq \frac{1}{15}$. 
%\STATE $U = 0^{d \times k/\eps^3}$, $W = 0^{d \times k/\eps^3}$, $Z = 0^{d \times k/\eps^2}$, $w=0$
\STATE $U = 0^{d \times k/\eps^3}$, $Z = 0^{d \times k/\eps^2}$, $w=0$, $w_U = 0^{k/\eps^3}$
\FOR {$t = 1,...,n$}
\STATE $w = w + \|x_t\|_2^2$
%\STATE sketch $x_t$ into $W$
\STATE $r_t = x_t - U U^\top  x_t$
\STATE $C = (I-U U^\top)ZZ^\top(I-U U^\top)$
\WHILE {$\|C+r_t r_t^\top\| \geq \max\{w_0,w\} \cdot \frac{k}{\eps^2}$.}
	\STATE $u, \lambda = \operatorname{topEigenvectorAndValue}(C)$
	\STATE $(w_U)_u \gets \lambda$
%	\STATE If $U$ has a zero column, write $u$ in its place. Otherwise, write $u$ instead of the column $v$ of $U$ with the minimal quantity of $\|Wv\|^2+\|Zv\|^2$
	\STATE If $U$ has a zero column, write $u$ in its place. Otherwise, write $u$ instead of the column $v$ of $U$ with the minimal quantity of $(w_U)_v$
	\STATE $C = (I-U U^\top)ZZ^\top(I-U U^\top)$
	\STATE $r_t = x_t - U U^\top  x_t$
\ENDWHILE
\STATE sketch $r_t$ into $Z$
\STATE $y_t \gets U^\top x_t$
\STATE For each non-zero column $u$ in $U$, $(w_U)_u \gets (w_U)_u + \ip{y_t,u}^2$
\ENDFOR
\end{algorithmic}
\caption{An efficient online PCA algorithm}
\dg{need to make a choice: either working with $\epsilon$ directly or with $\ell$.}

\dg{I don't understand why in one place we write $C\gets ZZ^{\top}(I-UU^{\top})$ and in another place $C\gets (I-UU^{\top})ZZ^{\top}(I-UU^{\top})$. This is really confusing}
\end{algorithm}

\subsection{Notations}
For a variable $\xi$ in the algorithm, where $\xi$ can either be a matrix $Z,X,C$ or a vector $r,x$ we denote by $\xi_t$ the value of $\xi$ at the end of iteration number $t$. $\xi_0$ will denote the value of the variable at the beginning of the algorithm. For variables that may change during the while loop we denote by $\xi_{t,z}$ the value of $\xi$ at the end of the $z$'th while loop in the $t$'th iteration. In particular, if the while loop was never entered the value of $\xi$ at the end of the iteration $t$ will be equal to $\xi_{t,0}$. For such $\xi$ that may change during the while loop notice that $\xi_t$ is its value at the end of the iteration, meaning after the last while loop has ended.

An exception to the above is for the variable $w$. Here, we denote by $w_t$ the value of $\max\{w_0,w\}$ at the end of iteration number $t$ \dg{so this is not really an exception - its the same convention}. We denote by $n$ the index of the last iteration of the algorithm. In particular $\xi_n$ denotes the value of $\xi$ upon the termination of the algorithm.


\subsection{Matrix Sketching}
We use the Frequent Direction matrix sketching algorithm presented by Liberty in \cite{Liberty13}. This algorithm provides a sketching algorithm for the streaming version where we observe a matrix $R$ column by column.


\begin{Lem} \label{lem:sketch prop}
Let $R_1,\ldots,R_t,\ldots$ be a sequence of matrices with columns of dimension $d$ where $R_{t+1}$ is obtained by appending a column vector $r_{t+1}$ to $R_t$. Let $Z_1,\ldots,Z_t,\ldots$ the corresponding sketches of $R$ obtained by adding these columns as they arrive, according to the Frequent Direction algorithm in \cite{Liberty13} with parameter $\ell$.
\begin{enumerate}
\item The amortized number of arithmetic operations required by the sketching algorithm is $O(\ell d)$ per column.

\item Each $Z_t$ is a matrix of dimensions $d \times 2\ell$.

\item Let $u$ be a left singular vector of $Z_t$ with singular value $\sigma$ and assume that $r_{t+1}$ is orthogonal to $u$. Then $u$ is a singular value of $Z_{t+1}$ with singular value $\leq \sigma$. \label{itm:r u orth Z}

\item For any vector $u$ and time $t$ it holds that $\|Z_t u\| \leq \|R_t u\|$. \label{itm:Z leq R}

\item For any $t$ there exists a matrix $E_t$ with $\|E_t\| \leq \|R_t\|_F^2 / \ell$ such that $Z_t Z_t^\top + E = R_t R_t^\top$. \label{itm: Z E plus R}
\end{enumerate}
\end{Lem}



\subsection{Proofs}
\begin{Obs} \label{obs:U orth str}
At all times, the matrix $U^\top U$ is a diagonal matrix with either zeros or ones across the diagonal. In other words, the non-zero column vectors of $U$, at any time point are orthonormal.
\end{Obs}
\begin{proof}
We prove the claim for any $U_{t,z}$ by induction on $(t,z)$. For $t=0$ the claim is trivial as $U_0=0$. For the step we need only to consider $U_{t,z}$ for $z>0$ since $U_{t,0} = U_{t-1}$. Let $u$ be the new vector in $U_{t,z}$. Since $u$ is defined as an eigenvector of 
$$C_{t,z-1} = (I-U_{t,z-1} U_{t,z-1}^\top) Z_{t-1} Z_{t-1}^\top (I-U_{t,z-1} U_{t,z-1}^\top) \ ,$$
we have that $U_{t,z-1}u=0$ and the claim immediately follows.
\end{proof}


\begin{lemma} \label{lem:us singular}
For all $t,z$, the non-zero columns of $U_{t,z}$ are left singular vectors of $Z_{t-1}$ (possibly with singular value zero). In particular, the claim holds for the final values of the matrices $U$ and $Z$.
\end{lemma}

\begin{proof}
We prove the claim by induction on $t,z$, with a trivial base case of $t=1$ where $Z_{t-1}=0$. Let $t>1$. For $z=0$, each non-zero column vector $u$ of $U_{t,0}$ is a non-zero column vector of $U_{t-1,z}$ for the largest valid $z$ w.r.t $t-1$. By the induction hypothesis it holds that $u$ is a singular vector of $Z_{t-2}$. According to Observation~\ref{obs:U orth str} we have that $r_{t-1}$, the vector added to $Z_{t-2}$ is orthogonal to $u$, hence Lemma~\ref{lem:sketch prop}, item~\ref{itm:r u orth Z} indicates that $u$ is a singular vector of $Z_{t-1}$ as required.

Consider now $z>0$. In this case $u$ is a vector added in the while loop. Recall that $u$ is defined as an eigenvector of 
$$C = (I-U_{t,z-1} U_{t,z-1}^\top) Z_{t-1} Z_{t-1}^\top (I-U_{t,z-1} U_{t,z-1}^\top) $$
According to our induction hypothesis, all of the non-zero column vectors of $U_{t,z-1}$ are singular vectors of $Z_{t-1}$, hence
$$Z_{t-1} Z_{t-1}^\top = C + U_{t,z-1} U_{t,z-1}^\top Z_{t-1} Z_{t-1}^\top U_{t,z-1} U_{t,z-1}^\top  \ . $$
The two equalities above imply that any eigenvector of $C$ is an eigenvector of $Z_{t-1} Z_{t-1}^\top$ as well. It follows that $u$ is a singular vector of $Z_{t-1}$ as required, thus proving the claim.
\end{proof}


\begin{Lem} \label{lem:v has low weight}
Let $v$ be a column vector of $U_{t,z}$ that is not in $U_{t,z+1}$. Let $(t',z')$ be the earliest time stamp from which $v$ was a column vector in $U$ consecutively up to time $(t,z)$. It holds that 
$$ \|Z_{t-1}v\|^2 + \|(X_{t-1}-X_{t'-1}) v\|^2 \leq \frac{2\eps^3}{k} w_{t-1} $$
%$$ \|Z_{t-1}v\|^2 + \|X_{t-1} v\|^2 \leq \frac{3\eps^3}{k} w_{t-1} $$
\end{Lem}
\begin{proof}
We denote by $(w_U)_u$ the values of the $w_U$ vector for the different directions $u$ at time $(t,z)$.
Let $\lambda_v$ be the eigenvalue associated with $v$ during the time it was entered to $U$. Then at that time $\|Zv\|^2 \lambda_v$. Furthermore, since $v$ was a column in $U$ up to time $t$ we get that all vectors added to $R$ from the insertion of $v$ up to time $t$ are orthogonal to $v$. Lemma~\ref{lem:sketch prop}, item~\ref{itm:r u orth Z} shows that 
$$\|Z_{t-1}v\|^2 \leq \lambda_v \leq (w_U)_v .$$
Since $v$ was a column vector of $U$ from iteration $t'$ up to iteration $t$, we have that 
$$ \|(X_{t-1}-X_{t'-1}) v\|^2 = \sum_{\tau=t'}^t \ip{v,y_\tau}^2 = (w_U)_v$$

It remains to bound the quantity of $(w_U)_v$. We will bound the sum $\sum_{u \in U_{t,z}} (w_U)_u$ and use the fact that $v$ is the minimizer of the corresponding expression hence $(w_U)_v$ is upper bounded by $\sum_u (w_U)_u \cdot \frac{\eps^3}{\ell}$. Notice that since $Z$ is a sketch of $R$, Lemma~\ref{lem:sketch prop}, item~\ref{itm:Z leq R} indicates that for $t_u$, the iteration in which $u$ is inserted into $U$, 
$$ \|R_{t_u-1}u\|^2 \geq \|Z_{t_u-1}u\|^2 = \lambda_u $$
Hence,
$$ \sum_u (w_U)_u \leq \sum_u \|(X_{t-1}-X_{t_u-1}) u\|^2 + \|R_{t_u-1}u\|^2 \leq $$
$$\sum_u \|X_{t-1}u\|^2 + \|R_{t-1}u\|^2 \leq \|X_{t-1}\|_F^2 + \|R_{t-1}\|_F^2 \leq 2\|X_{t-1}\|_F^2 = $$
$$  \frac{2\eps^3}{k} w_{t-1}  $$
\end{proof}



\begin{Lem} \label{lem: bound C sketch}
At all times $\|C_{t,z}\| \leq w_{t-1} \cdot \frac{2\eps^2}{k}$,
\end{Lem} 
\begin{proof}
We prove the claim by induction over $t,z$. The base case for $t=0$ is trivial. For $t>0$, $z=0$, we have that $C_{t,0}=C_{t-1,z}$ for some $z$. Since $w_{t-1} \geq w_{t-2}$ the claim holds. Consider now $t,z>0$. We have that 
$$C_{t,z-1} = (I-U_{t,z-1} U_{t,z-1}^\top) Z_{t-1} Z_{t-1}^\top (I-U_{t,z-1} U_{t,z-1}^\top) $$
$$C_{t,z} = (I-U_{t,z} U_{t,z}^\top) Z_{t-1} Z_{t-1}^\top (I-U_{t,z} U_{t,z}^\top) $$
If $U_{t,z}$ is obtained by writing $u$ instead of a zero column of $U_{t,z-1}$ the $C_{t,z}$ is a projection of $C_{t,z-1}$ and the claim holds due to the induction hypothesis. If not, $u$ is inserted instead of some vector $v$. According to Lemmas~\ref{lem:us singular} and~\ref{lem:v has low weight}, $v$ is an eigenvector of $Z_{t-1} Z_{t-1}^\top$ with eigenvalue $\lambda_v \leq w_{t-1} \cdot \frac{2\eps^2}{k}$. It follows that $C_{t,z}$ is a projection of $C_{t,z-1}+\lambda_v v v^\top$. Now, since $C_{t,z-1}v=0$ (as $v$ is a column vector of $U_{t,z-1}$) we have that
$$\|C_{t,z}\| \leq \|C_{t,z-1}+\lambda_v v v^\top\| = \max\{ \|C_{t,z-1}\|, \|\lambda_v v v^\top\| \}$$
According to our induction hypothesis and the bound for $\lambda_v$, the above expression is bounded by $w_{t-1} \cdot \frac{2\eps^2}{k}$ as required.
\end{proof}


\begin{Lem} \label{lem:lambda upper sketch}
Let $u$ be a vector that is not in $U_{t,z}$ and in $U_{t,z+1}$. Let $\lambda$ be the eigenvector associated to it w.r.t $C_{t,z}$. It holds that $\lambda \leq w_{t-1} \cdot \frac{2\eps^2}{k}$. Furthermore, if $u$ is a column vector in $U_{t',z'}$ for all $t' \geq t, z' \geq z$, it holds that $\|u^\top Z_n\|^2 \leq \lambda \leq \|X_n\|_F^2 \cdot \frac{2\eps^2}{k} $.
\end{Lem}
\begin{proof}
Since $u$ is chosen as the top eigenvector of $C_{t,z}$ we have by Lemma~\ref{lem: bound C sketch} that
$$\lambda = \|C_{t,z} u\| = \|C_{t,z}\| \leq   w_{t-1} \cdot \frac{2\eps^2}{k} $$

For the second claim in the lemma we note that since $u$ is an eigenvector of $C_{t,z-1}= (I-U_{t,z-1} U_{t,z-1}^\top)  Z_{t-1}Z_{t-1}^\top  (I-U_{t,z-1} U_{t,z-1}^\top)$, we have that $U_{t,z-1}^\top u=0$, hence
$$ \|u^\top Z_{t-1}\|^2 = \|u^\top (I-U_{t,z-1} U_{t,z-1}^\top) Z_{t-1}\|^2 = u^\top C_{t,z} u \leq \|C_{t,z} u\| \leq w_{t-1} \cdot \frac{2\eps^2}{k}$$
Since $u$ is assumed to be an element of $U$ throughout the running time of the algorithm, it holds that for all future vectors $r$ added to the sketch $Z$, $u$ is orthogonal to $r$. The claim now follows from Lemma~\ref{lem:sketch prop} item~\ref{itm:r u orth Z}.
\end{proof}


\begin{lemma}\label{lem2strong}
$ \|R_n\|_2^2 \leq \frac{3\eps^2}{k} \|X_n\|_{\mathrm{F}}^2.$
\end{lemma}
\begin{proof}
Let $u_1,\ldots,u_{\ell}$ and $\lambda_1,\ldots,\lambda_{\ell}$ be the columns of $U_n$ and their corresponding eigenvalues in $C$ at the time of their addition to $U$.
From Lemmas~\ref{lem:us singular} and~\ref{lem:lambda upper sketch} we have that each $u_j$ is an eigenvector of $ZZ^\top$ with eigenvalue $\lambda_j' \leq \lambda_j \leq \frac{2\eps^2}{k}\|X_n\|_F^2$. It follows that
$$ \|Z_n Z_n^\top \| = \max \left\{ \frac{2\eps^2}{k}\|X_n\|_F^2, \|(I-U_n U_n^\top) Z_n Z_n^\top (I-U_n U_n^\top)\| \right\} $$
$$=  \max \left\{ \frac{2\eps^2}{k}\|X_n\|_F^2, \|C_n\| \right\} \leq \frac{2\eps^2}{k}\|X_n\|_F^2  \ .$$
The last inequality is due to Lemma~\ref{lem: bound C sketch}.

Next, by the sketching property (Lemma~\ref{lem:sketch prop} item~\ref{itm: Z E plus R}), for appropriate matrix $E$: $Z_n Z_n^\top = R_n R_n^\top + E$, with $\|E\| \leq \frac{\eps^2}{k} \|R_n\|_F^2 $. As the columns of $R$ are projections of those of $X$ we have that $\|R_n\|_F^2 \leq \|X_n\|_F^2$, hence
$$ \|R_n\|_2^2 = \|R_n R_n^\top \| = \|Z_n Z_n^\top - E\| \leq \|Z_n Z_n^\top \| + \|E\| \leq \frac{3\eps^2}{k}\|X_n\|_F^2$$
\end{proof}



\begin{Lem} \label{thm1strong}
$$ 
%\|R\|_F^2 \le OPT_k + \sqrt{\frac{8 k}{\ell}}\cdot \|X\|_{\mathrm{F}}^2
%4 \OPT_k + \sqrt{ \frac{ 36 k}{\ell} } || X ||_{\mathrm{F}}^2
\|R_n\|_F^2 \le \OPT_k + 6\eps \|X_n\|_{\mathrm{F}}^2
$$
\end{Lem}
\begin{proof}
The Lemma can be proven analogically to Theorem~\ref{thm1} as the only difference is the bound over $\|R_n\|_2^2$. 
\end{proof}

\begin{Lem}\label{lem3stronger}
Assume that for all $t$, $\|x_t\|_2^2 \leq w_t \cdot \frac{\eps^2}{5k}$. Assume that $\eps \leq 0.1$. For $\tau>0$ consider the iterations of the algorithm during which $w_t \in [2^\tau, 2^{\tau+1})$. During this time, the while loop will be executed at most $5 k/\eps^2$ times. \zk{we can remove the requirement of $\eps < 0.1$ by having the appropriate sketches of size $\max\{10, 1/\eps\} \cdot k/\eps^2$.}
\end{Lem}
\begin{proof}
For the proof, we define a potential function 
$$\Phi_{t,z} =  \mathrm{Trace}(R_{t-1}R_{t-1}^\top) - \mathrm{Trace}(C_{t,z}) \ .$$
We first notice that since $C$ is clearly PSD,
$$\Phi_{t,z} \leq \mathrm{Trace}(R_{t-1} R_{t-1}^\top) = \|R_{t-1}\|_F^2 \leq \|X_{t-1}\|_F^2 \leq 2^{\tau+1} \ .$$
The first inequality is since the columns of $R$ are projections of those of $X$ and the second is since $\|X_{t-1}\|_F^2 \leq w_{t-1}$. 
We will show that first, $\Phi$ is non-decreasing with time and furthermore, for valid $z>0$, $\Phi_{t,z} \geq \Phi_{t,z-1}+0.2\frac{\eps^2}{k} 2^{\tau+1}$. The result immediately follows.


Consider a pair $(t,z)$ followed by the pair of indices $(t+1,0)$. Here, $\Phi_{t+1,0}-\Phi_{t,z} = \|r_t\|^2 \geq 0$ hence for such pairs the potential is non-decreasing. Now consider some pair $(t,z)$ for $z>0$. Since $(t,z)$ is a valid pair it holds that
$$\|C_{t,z-1}\| \geq \|C_{t,z-1} + r_{t,z-1} r_{t,z-1}^\top - r_{t,z-1} r_{t,z-1}^\top\| \geq  
$$
\begin{equation} \label{eq:C t z lb}
\|C_{t,z-1} + r_{t,z-1} r_{t,z-1}^\top \|- \|r_{t,z-1} r_{t,z-1}^\top\| \geq w_t \frac{\eps^2}{k} (1-0.2) \geq 0.4 \cdot 2^{\tau+1} \frac{\eps^2}{k}
\end{equation}
Denote by $u$ the column vector in $U_{t,z}$ that is not in $U_{t,z-1}$. Let $U'$ be the matrix obtained by appending the column $u$ to the matrix $U_{t,z-1}$. Let 
$$C' = (I-U'(U')^\top)Z_{t-1}Z_{t-1}^\top (I-U'(U')^\top) = (I-uu^\top) C_{t,z-1} (I-uu^\top)$$
Since $u$ is the top eigenvector of $C_{t,z-1}$ we have by equation~\eqref{eq:C t z lb} that
$$\mathrm{Trace}(C') - \mathrm{Trace}(C_{t,z-1}) = \|C_{t,z-1}\| \geq 0.4 \cdot 2^{\tau+1} \frac{\eps^2}{k}$$

If $U_{t,z-1}$ had a zero column then $C_{t,z}=C'$ and we are done. If not, let $v$ be the vector that was replaced by $u$.
According to Lemma~\ref{lem:us singular}, $v$ is a singular vector of $Z_{t-1}$. According to Lemma~\ref{lem:v has low weight} and $\eps < 0.1$ we have that
$$\|Z_{t-1}v\|^2 \leq \frac{2\eps^3}{k}\|X_{t-1}\|_F^2 \leq \frac{\eps^2}{5k} 2^{\tau+1} \ .$$
Hence, 
$$C_{t,z} = C' + \|Z_{t-1}v\|^2 \cdot vv^\top$$
meaning that 
$$ \mathrm{Trace}(C_{t,z} - C_{t,z-1}) = \mathrm{Trace}(C_{t,z} -C') + \mathrm{Trace}(C' -C_{t,z-1})  \geq \frac{\eps^2}{5k} 2^{\tau+1} \ . $$

We conclude that as required $\Phi$ is non-decreasing over time and in each iteration of the while loop, increases by at least $\frac{\eps^2}{5k} 2^{\tau+1}$. Since $\Phi$ is upper bounded by $2^{\tau+1}$ during the discussed iterations, the lemma follows.
\end{proof}



\begin{lemma}\label{lem1stronger}
Let $(v_1,t_1'+1,t_1+1),\ldots,(v_j,t_j'+1,t_j+1),\ldots$ be the sequence of triplets of vectors removed from $U$, and the times during which they were added to $U$ and the times during which they were removed from $U$.
%Let $(v_1,t_1+1),\ldots,(v_j,t_j+1),\ldots$ be the sequence of pairs of vectors removed from $U$, and the iteration times during which they were moved.
$$\ALG_{\ell} \le \left(\|R_n\|_F+ 2\sqrt{\sum_j \|(X_{t_j}-X_{t_j'}) v_j\|^2}  \right)^2  .$$
\end{lemma}
\begin{proof}
For any time $t$ denote by $U_t$ the matrix $U$ in the end of iteration $t$, by $U_t^{(1)}$ the outcome of zeroing-out every column of $U_t$ that is different from the corresponding column in $U_n$ and by $U_t^{(2)}$ its complement, that is the outcome of zeroing-out every column in $U_t$ that is identical to the corresponding column in $U_n$. In the same way define $U^{(2)}$ to be the outcome of zeroing-out columns in $U_n$ that are all zeros in $U_t^{(2)}$.

It holds that,
\begin{eqnarray*}
\|x_t - U_n U_t^{\top}x_t\|^2 &=& \|x_t - (U_t + U_n - U_t)U_t^{\top}x_t\|^2 \\
&\leq & \left({\|x_t - U_tU_t^{\top}x_t\| + \|(U_n-U_t)U_t^{\top}x_t\|}\right)^2 \\
& = & \left({\|r_t\| + \|(U^{(2)}-U_t^{(2)})(U_t^{(2)})^{\top}x_t\|}\right)^2 \\
& \leq & \left({\|r_t\| + 2\|(U_t^{(2)})^{\top}x_t\|}\right)^2 
\end{eqnarray*}

Summing over all times $t$ we have,
\begin{eqnarray*}
\ALG_{\ell} &=&\sum_t\left({\|r_t\| + 2\|(U_t^{(2)})^{\top}x_t\|}\right)^2 \\
&=& \sum_t\|r_t\|^2 + 4\|r_t\|\|(U_t^{(2)})^{\top}x_t\| + 4\|(U_t^{(2)})^{\top}x_t\|^2 \\
& \leq & \|R_n\|_{\mathrm{F}}^2 + 4\sqrt{\sum_t\|r_t\|^2}\sqrt{\sum_t\|(U_t^{(2)})^{\top}x_t\|^2} + 4\sum_t\|(U_t^{(2)})^{\top}x_t\|^2
\end{eqnarray*}

Since $U_t^{(2)}$ contains only vectors that were columns of $U$ at time $t$ but were replace later, and are not present in $U_n$ we have that $|(U_t^{(2)})^{\top}x_t\|^2 \leq \sum_{j:t_j > t > t_j'}(x_t^{\top}v_j)^2$ and so $\sum_t\|(U_t^{(2)})^{\top}x_t\|^2 \leq \sum_j\|(X_{t_j}-X_{t_j'})v_j\|^2$. Thus we have that,

\begin{eqnarray*}
\ALG_{\ell} & \leq & \|R_n\|_{\mathrm{F}}^2 + 4\Vert{R_n}\Vert_{\mathrm{F}}\sqrt{\sum_j\|(X_{t_j}-X_{t_j'})v_j\|^2} + 4\sum_j\|(X_{t_j}-X_{t_j'})v_j\|^2 \\
&=& \left({\|R_n\|_{\mathrm{F}} + 2\sqrt{\sum_j\|(X_{t_j}-X_{t_j'})v_j\|^2}}\right)^2
\end{eqnarray*}
\end{proof} 


\begin{Lem} \label{lem:ditch loss}
Let $(v_1,t_1'+1,t_1+1),\ldots,(v_j,t_j'+1,t_j+1),\ldots$ be the sequence of triplets of vectors removed from $U$, and the times during which they were added to $U$ and the times during which they were removed from $U$.
$$\sum_j \|(X_{t_j}-X_{t_j'}) v_j\|^2 \leq 20\eps \|X_n\|_F^2  .$$
\end{Lem}
\begin{proof}
For some $\tau>0$ consider the execution of the algorithm during the period in which $w_t \in [2^{\tau},2^{\tau+1})$. According to Lemma~\ref{lem3stronger}, at most $\frac{5k}{\eps^2}$ vectors $v$ were removed from the $U$ during that period. According to Lemma~\ref{lem:v has low weight}, for each such $v_j$ it holds that 
$$ \|(X_{t_j}-X_{t_j'}) v\|^2 \leq w_{t_j} \cdot \frac{2\eps^3}{k} \leq \frac{2\eps^3}{k} 2^{\tau+1} $$
It follows that the contribution of vectors $v$ thrown from the set during the discussed time period is at most 
$$ 5\frac{k}{\eps^2} \cdot \frac{2\eps^3 }{k} 2^{\tau+1} = 10\eps \cdot 2^{\tau+1} $$
The entire sum can now be bounded by a geometric series, ending at $\tau =  \log_2(\|X\|_F^2)$ thus proving the lemma.
\end{proof}


\begin{Cor}
$$\ALG_{k,\eps} \leq \left( \sqrt{\OPT_k} + \left(\sqrt{6}+\sqrt{20}\right) \sqrt{\eps} \|X_n\|_F \right)^2 \leq \left( \sqrt{\OPT_k} + 6.93 \sqrt{\eps} \|X_n\|_F \right)^2$$
In particular, for $\eps = \delta^2 \cdot \OPT_k/\|X_n\|_F^2$,
$$ \ALG_{k,\eps} = \OPT_k (1+O(\delta)) $$
\end{Cor}





\zk{I did not touch the time and space complexity}

\begin{lemma}[Time complexity]\label{lem5strong} 
Algorithm 1 requires $O(n \log(\mu n)d \ell )$ arithmetic operations.
\end{lemma}
\begin{proof}
It is an easy task to see that the time spent outside the while loop, not including the condition verification of the while loop is $O(d\ell)$ (Lemma~\ref{lem:sketch prop}). The verification of the while loop requires $O(d\ell^2)$ arithmetic operations. However, if we assume that each vector $x_i$ has norm $\|x_i\|^2 \leq \max\{w_0,w\}/\ell^2$, and make sure that inside the while loop, we add vectors to the sketch until $\|C\| \leq \max\{ w_0,w \}/(2\ell)$ then we can check the condition in a lazy fashion, and this will be done in no more than $1/\ell$ of the iterations. This concludes a running time of $O(nd\ell)$ not including the inner while loop.

By Lemma~\ref{lem3strong} the while loop will be activated at most \zk{get the exact number right}. Each such time we require $O(d\ell^2)$ arithmetic operations leading to a total running time of \zk{???}
\end{proof}

\begin{lemma}[Space complexity]\label{lem6strong}
 Algorithm 1 requires $O(d \ell \log(\mu n))$ space. \zk{fix bound}
\end{lemma}
\begin{proof}
It is clear that the algorithm maintains at most $O(\ell')$ column vectors of size $d$. The bound thus follows from the bound on $\ell'$ in Lemma~\ref{lem3strong}.
\end{proof}


















