\documentclass[12pt]{article}
\usepackage{fullpage,graphicx,psfrag,url,verbatim}
\usepackage[small,bf]{caption}
\setlength{\captionmargin}{30pt}

\input defs.tex
\newcommand{\sign}{\mathop{\bf sign}}

\bibliographystyle{alpha}

\title{Covariance Matrix Fusion}
\author{Manuel Gomez-Rodriguez, Robert Henriksson, Erik Rigtorp}

\begin{document}
\maketitle

% Title.
% ------

%\address{}

\abstract

We consider the problem of estimating a positive semidefinite (PSD) matrix $S$ that best fits a set of given PSD
symmetric submatrices under some convex similarity measure, and some bounds on linear functions of $S$. This can be formulated
as a convex optimization problem~\cite{BoV:04}.

The quality of estimation for every element of the matrix $S$ is considered equally important and therefore, different entry-wise norms are proposed as similarity
measures (\ie, Frobenius norm, $l_{1}$ norm)~\cite{BoLin:05}. The dual formulation is discussed under Frobenius norm assumption. Furthermore, different stochastic
formulations are explored.

One application we consider is the estimation of the covariance matrix for the daily return of a collection of
stocks given a set of estimated covariance submatrices from different sources. Performance of the estimation will be discussed for
different constraints and availability of data.

\section{The problem}

This project concerns the following problem. We are given a set of symmetric positive semidefinite (PSD) submatrices $\lbrace \hat S_i \rbrace_{i=1}^m$ and we seek $S$, the symmetric PSD matrix that has the nearest PSD submatrices $\lbrace S_i \rbrace_{i=1}^m$ to their corresponding given submatrices $\lbrace \hat S_i \rbrace_{i=1}^m$, in terms of a convex similarity measure $\phi(\cdot)$, and additionally satisfies some linear constraints. This is a convex optimization problem~\cite{BoV:04,boyd1994lmi}.

Since any real-valued linear function $f$ on the set of symmetric PSD matrices can be expressed as $f(U) = \Tr AU$ for some symmetric matrix A,~\cite{BoLin:05}, any linear equality or inequality constraints can be expressed as
\[
\begin{array}{lll}
\Tr A_j S \leq b_j & \mbox{or} & \Tr C_k S = c_k.
\end{array}
\]

The general problem can then be formulated as
\BEQ\label{theprob1}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {w_i \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ
Here $S$, $A_i$ and $C_i$ are $n \times n$ symmetric PSD (and real) matrices, $S$ is the optimization variable, $\lbrace \hat S_i \rbrace_{i=1}^m$, where $\hat S_i$ is an $n_i \times n_i$ ($n_i < n$) symmetric PSD matrix, are problem data and $w_i$ sets the prior confidence on $\hat S_i$. It is easy to show that for any symmetric PSD submatrix $S_i$, there is a $n \times n_i$ matrix $P_i$ such that $S_i = P_i^{T} S P_i$.

The quality of estimation for every element of the matrix $S$ is considered equally important and therefore, an entry-wise norm is adopted as similarity measure. In particular, a Frobenius norm is chosen in order to have a least-squares adjustment. Our problem can be considered an extension of the \emph{least-squares covariance adjustment problem} (LSCAP)~\cite{BoLin:05}.
Then, we can rewrite~(\ref{theprob1}) as
\BEQ\label{theprob2}
\begin{array}{lll}
\mbox{minimize} & \frac{1}{2} \sum_{i=1}^m {w_i ||P_i^{T} S P_i - \hat S_i||_{F}^2} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ

\section{Dual formulation}

Proceeding in a similar manner as in~\cite{BoLin:05} we derive a dual formulation of (\ref{theprob2}). We associate the Lagrange multipliers $\nu_1, . . . ,\nu_p$ with the equality constraints, $\mu_1, . . . , \mu_m$ with the inequality constraints and the symmetric $n \times n$ matrix $Z$ with the matrix inequality $S \succeq 0$. The Lagrangian is then
\BEQ \label{lag}
    L(S,Z,\nu,\mu) = \frac{1}{2} \sum_{i=1}^n w_i \|P_i^T S P_i - \hat S_i\|_F^2 + \Tr(S(-Z+A(\nu)+C(\mu))) - \nu^T b - \mu^T d,
\EEQ
where $A(\nu)=\sum_{j=1}^p \nu_j A_j$ and $C(\mu)=\sum_{k=i}^m \mu_k C_k$. The Lagrangian is convex in $S$ and concave in $Z$, $\nu$ and $\mu$, as expected. To obtain $S$ such that the Lagrangian is minimized, we calculate the gradient of (\ref{lag}) with respect to $S$,
\BEQ
    \nabla_S L(S,Z,\nu,\mu) = \frac{1}{2} \sum^{n}_{i=1} w_{i}
    \nabla_S (\|P_i^T S P_i - \hat S_i \|_F^2) - Z + A(\nu) + C(\mu),
\EEQ and,
\BEAS \label{eq:matrixgrad}
    \nabla_S (\|P_i^T S P_i - \hat S_i \|_F^2) &= & \\
    &=& \nabla_S (\Tr((P_i^T S P_i - \hat S_i)^T (P_i^T S  P_i - \hat S_i))) \\
    &=& \nabla_S (\Tr(P_i^T S P_i P_i^T S P_i - P_i^T S P_i \hat S_i - \hat S_i P_i^T S P_i - \hat S_i P_i^T S P_i - \hat S_i^T \hat S_i)) \\
    &=& 2 P_i P_i^T S P_i P_i^T - 2 P_i \hat S_i P_i^T.
\EEAS
The last step follows from basic differential
calculus~\cite{PePed:08}. Then, setting $\nabla_S L(S,Z,\nu,\mu) =
0$, we have, \BEQ \label{gradfinal}
    \nabla_S(S,Z,\nu,\mu)=\sum^{n}_{i=1} w_{i} (P_i P_i^T S P_i P_i^T - P_i \hat S_i P_i^T) - Z + A(\nu) + C(\mu)=0.
\EEQ
We realize that $P_i P_i^T S P_i P_i^T$ sets all the elements not in $S_i$ to $0$ and $P_i \hat S_i P_i$ expands $\hat S_i$ to $n \times n$ while setting
the elements not in $\hat S_i$ to $0$. Then, for each element $S_{ij}$ that has a corresponding element in the set $\lbrace \hat S_k \rbrace_{k=1}^m$ we
have that,
\BEQ \label{}
    \sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k (S_{ij} -
    (P_k \hat S_k P_k^T)_{ij}) = Z_{ij}-A(\nu)_{ij}-C(\mu)_{ij}.
\EEQ
Solving (\ref{}) for $S_{ij}$ gives,
\BEQ \label{sopt1}
    S_{ij} = \frac{1}{\sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k}
    \left( \sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k (P_k \hat S_k P_k^T)_{ij} +
    Z_{ij}-A(\nu)_{ij}-C(\mu)_{ij} \right).
\EEQ
For every $S_{ij}$ that does not have a corresponding element in the set $\lbrace \hat S_k \rbrace_{k=1}^m$, we have that
\BEQ \label{sopt2}
    Z_{ij} - A(\nu)_{ij} - C(\mu)_{ij} = 0,
\EEQ

In case of omitting the positive semidefinite requirement and the linear constraints, we see that for every element $S_{ij}$ that is \emph{covered} by at least
one estimate $\hat S_k$, it is optimal to simply let it be the weighted average of the respective component in the estimates $\lbrace \hat S_k \rbrace_{k=1}^m$.

Further implications of the dual formulation will be discussed in
the final report.

\section{Robust formulation}

In the stochastic formulation of the problem, $\lbrace \hat S_i
\rbrace_{i=1}^m$ is a collection of random variables with some known
distribution in the space of symmetric PSD
matrices. Under this assumption, we judge the similarity between
$S_i$ and $\hat S_i$ by the expected value of the similarity
functions,
\[
\Expect \phi(S_i, \hat S_i),
\]
where the expectation is over $\hat S_i$. The stochastic problem can
then be expressed as \BEQ\label{rf1}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\Expect \phi(S_i, \hat S_i)} & \\
\mbox{subject to} & S \succeq 0 & \\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ If we use a Montecarlo approximation for the expectation, we can rewrite~(\ref{rf1}) as
\BEQ\label{rf2}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\sum_{j=1}^{N_i} \frac{1}{N_i} \phi(S_i, \hat S_i^{(j)})} & \\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEQ where $N_i$ is the number of samples drawn for every $\hat S_i$. Obviously, for any $\epsilon > 0$, there is a sufficiently large $N_i$
such that the solution for ~(\ref{rf2}) is $\epsilon$-suboptimal to ~(\ref{rf1}).

We can also consider the certainty equivalent of the original stochastic
formulation~(\ref{rf1}), \BEQ\label{rf3}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\phi(S_i, \Expect \hat S_i)} &\\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEQ in which the random variable in each similarity function has
been replaced by its expected value. Using Jensens' inequality,
\[
\Expect \phi(S_i, \hat S_i) \geq \phi(S_i, \Expect \hat S_i),
\]
the optimal value of~(\ref{rf3}) gives a lower bound on the optimal
value of the stochastic problem~(\ref{rf1}).

We can also model every $\hat S_i \in \lbrace \hat S_j
\rbrace_{j=1}^m$ by an unknown-but-bounded model. For every $S_i$,
we are given a set $\mathcal{S}_i$ of possible values, and we judge
the similarity between $S_i$ and $\hat S_i$ by the worst value of
the convex similarity function over the set, \BEQ\label{ss3}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\sup_{S_i \in \mathcal{S}_i} \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEQ

Finally, \emph{chance constraints} can also be introduced, which
require hold with some probability, \BEQ
\begin{array}{ll}
\Prob(b_i \geq \Tr A_i S) \geq \eta_b \\
\Prob(c_i = \Tr C_i S) \geq \eta_c,
\end{array}
\EEQ where $b_i$, and $c_i$ are random variables with known
distributions and $\eta_b$ and $\eta_c$ are lower bounds in the
probabilities.


\section{Applications}

\subsection{Covariance of equities}

Suppose that we are given a number of covariance matrices for different sets of stocks, and an estimate of a covariance matrix for all the stocks together is required. These matrices can come from different sources. The purpose of this estimation is to estimate correlation between stocks which were not initially in the given covariance matrices.

A mathematical description of this would be the following; Given $n$ covariance matrices $\hat \Sigma_i$ for different groups of stocks $l_i$. The task is to estimate a covariance matrix $\Sigma$ for the full set of stocks $L=\bigcup _{i=1}^nl_i$. Also, if the sources of the covariance matrices have different reliability, then a weight $w_i$ will be assigned to each given covariance matrix, $\hat
\Sigma_i$.

This can now be formulated as the described optimization problem.
\newpage
\subsection{Test case}

For this example with 20 variables, a covariance matrix for all the
variables is generated and from this, a number of symmetric
submatrices are given used in the . The basic deterministic method
as in ~(\ref{theprob1}) is used to estimate the original covariance
matrix, using different numbers of the sub blocks.
\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.6\textwidth]{matlabfigures/TestCase_NumOfBlocks.pdf}
\end{center}
\caption{This shows how the number of sub blocks affect the
estimation error} \label{TestCase_NumOfBlocks}
\end{figure}

\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.6\textwidth]{matlabfigures/TestCase_ImageRelativeError.pdf}
\end{center}
\caption{This shows how the number of sub blocks affect the relative
estimation error for each element} \label{TestCase_NumOfBlocks}
\end{figure}



\begin{comment}

%Add figures side by side
\begin{figure}
\begin{minipage}[b]{0.5\linewidth} % A minipage that covers half the page
\centering
\includegraphics[width=10cm]{matlabfigures/TestCase_ImageRelativeError.pdf}
\caption{En liten bild}
\end{minipage}
\hspace{0.5cm} % To get a little bit of space between the figures
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=8cm]{matlabfigures/TestCase_NumOfBlocks.pdf}
\caption{En liten bild till}
\end{minipage}
\end{figure}


To implement and try the method, a test case is generated with $300$
samples from a set of $20$ gaussian variables with given covariance
$\hat \Sigma$. Then $10$ sub blocks with different combinations of
these variables are estimated.

\subsection{Deterministic formulation}

In this case, the covariance $\hat \Sigma$ is estimated with
different numbers of sub blocks and different levels of noise using
the deterministic formulation from ~(\ref{theprob1}), see
figure~\ref{TestCaseDeterministic}.

\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.4\textwidth]{matlabfigures/TestCaseDeterministic}
\end{center}
\caption{This shows how the number of sub blocks in the estimation
and the number of samples affect the estimation error}
\label{TestCaseDeterministic}
\end{figure}

\subsection{Robust formulation}

In this case, $10$ versions of the covariance matrices are chosen,
with independent gaussian noise on each of them, in order to give a
rough estimation of the expected value. The covariance $\hat \Sigma$
is then estimated with different numbers of sub blocks and samples,
using the robust formulation of the expected value as
in~(\ref{rf2}). See figure~\ref{TestCase_ExpectedValue}.

\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.4\textwidth]{matlabfigures/TestCase_ExpectedValue}
\end{center}
\caption{This illustrates how well different levels of noise is
rejected, depending on the number of sub blocks in the estimation of
the covariance matrix, and the number of samples}
\label{TestCase_ExpectedValue}
\end{figure}



\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.4\textwidth]{matlabfigures/TestCase_Deterministic}
\end{center}
\end{figure}

\newpage
\appendix


\section{Projection onto the probability simplex}
    Consider the problem of projecting $x \in R^n$ onto the probability
    simplex, in Euclidean sense. This problem can be formulated as
    the following optimization problem
    \BEQ\label{form}
        \begin{array}{ll}
        \mbox{minimize} & \|x-y\|_2^2  \\
        \mbox{subject to} & y \ge 0 \quad {\bf 1}^T y =1
        \end{array}
    \EEQ
    which can be solved using the dual of the problem.
    The optimal $y$ can be shown to be $y^*=(x-{\bf 1}^T\nu^*)_+$,
    where $\nu^*$ is the optimal dual variable corresponding to the
    constraint ${\bf 1}^T y =1$. The optimal dual variable, $\nu^*$, can be derived using
    the following method:
    For $k=0, \ldots, n-1$ set:  \nu_k = {1 \over {n-k}} (\sum_{i=k+1}^n \tilde x_i - 1)
    And

\section{Similarity measures}

    When solving the optimization problem, different norms can be
    used as similarity functions - each norm with different interpretations of the resulting
    covariance estimation.
\subsection{Frobenius norm}
    \[
    \|S - \hat S \|_{F}^2  = \| E \|_F^2 = \sum_{i=1}^n \lambda_i^2(E)
    \]
    is equivalent to applying the $\ell_2-norm$ to
    the eigenvalues of the matrix. Thus, the penalty puts small
    weight on small eigenvalues and bug significant weight on large
    eigenvalues. This will result in the eigenvalues clustering around
    zero ~\cite{BoV:04}.
    Another interpretation of the Frobenius norm can also be derived
    from the alternate expression,
    \[
    \| E \|_F^2 = \sum_{i=1}^n e_{i,j}^2
    \]
    which corresponds to a element wise $\ell_2-norm$
    penalty, and thus the differences will be clustered around zero~\cite{BoV:04}.

\subsection{Entry-size $\ell_1-norm$}
    \[
    \|S - \hat S \|_1  = \| E \|_1 = \sum_{i=1}^n | e_{i,j} |
    \]
    is equivalent to a $\ell_1-norm$ penalty function on the elements. This penalty
    function will cause a sparsity pattern in the error matrix $E = S - \hat S$ and thus cause
    some covariances to be exactly estimated and some to be spread out on a wide range around zero~\cite{BoV:04}.

\subsection{Spectral norm}
    \[
    \|S - \hat S \|_2^2  = \| E \|_2^2 = \max_{i=1,\ldots,n} \lambda_i^2(E)
    \]
    puts no weight on eigenvalues below the maximum. Thus the eigenvalues should
    be
    clustered around a fixed distance from the origin, making the maximum eigenvalue small.
    \newline

\subsection{Trace norm}
    \[
    \|S - \hat S \|_{tr}^2  = \| E \|_{tr}^2 = \sum_{i=1}^n \|\lambda_i(E)\|
    \]
    is equivalent to a $\ell_1-norm$ penalty function on the eigenvalues. This penalty
    function should thus cause a sparsity pattern in the eigenvalues with a portion of the
    eigenvalues exactly zero and the others distributed over a wide
    range around zero~\cite{BoV:04}.

\subsection{Factor modeling}

Suppose we are given a sample covariance matrix $\Sigma$ for a
random process and we want to find an approximation of the form
$\hat \Sigma = FF^T + D$ with $F \in R^{n \times r}$ and $D$
diagonal with nonnegative entries.

   \BEQ\label{form}
        \begin{array}{ll}
        \mbox{minimize} & \Phi(\Sigma, S)\\
        \mbox{subject to} &  S \succeq 0 \\
        &S = FF^T + D\\
        \end{array}
    \EEQ

This is not a convex problem but it can be relaxed into one.
Consider the problem again, we want to find a matrix $X$ that is low
rank such that $S=X+D$. By minimizing $\Rank(X)$ subject to the
constraint $\Phi(\Sigma, X + D) \leq \epsilon$ we find the model
with fewest factors that is within distance $\epsilon$ from $\Sigma$
under the measure $\phi(\cdot)$.

This objective is not convex, but it has a convex relaxation. The
trace norm is the sum of the singular values, $\|X\|_{T}
=\sum_{i=1}^{\min\{m,\,n\}} \sigma_{i}$. Minimizing the trace norm
will minimize $\Rank(X)$.

    \BEQ\label{form}
        \begin{array}{ll}
        \mbox{minimize} & \|X\|_T\\
        \mbox{subject to} &  X \succeq 0, D \succeq 0 diagonal \\
        &\Phi(\Sigma, X + D) \leq \epsilon\\
        \end{array}
    \EEQ

\section{Optimal weights}
    When blending the submatrices $\hat S_i$ the problem with having
    different reliability of the sources arises. This problem can be
    handled with putting different weights $w$ on each submatrix in the
    objective function,

    \BEQ\label{form}
        \begin{array}{ll}
        \mbox{minimize} & f = \sum_{i=1}^m {w_i \phi(S_i, \hat S_i)} \\
        \mbox{subject to} & S \succeq 0 \quad S \in C\\
        & w \ge 0 \quad {\bf 1}^T w =1
        \end{array}
    \EEQ
    and with this formulation, one way to find optimal weights can be derived with
    a projected subgradient method based on solving (2) with
    fixed $w$, and then update $w$ and solve (2) again. This is can be formulated as the convex optimization problem

    \BEQ\label{form}
        \begin{array}{ll}
        \mbox{minimize} & f(w) = \inf _{S \in C } \sum_{i=1}^m {w_i \phi(S_i, \hat S_i)} \\
        \mbox{subject to} & w \ge 0 \quad {\bf 1}^T w =1 \\
        &C=\{S: S \succeq 0 \quad, |S_{ij}| \leq \sigma_{max}^2\}

        \end{array}
    \EEQ
    This problem is a non-differentiable convex problem which can be
    solved with a projected sub-gradient method according to the
    algorithm below.\newline \newline
    {\bf given} $k=0, w_i^k = {1 \over m}$ \newline
    {\bf repeat}
    \begin{enumerate}
    \item Solve (2) using $w=w^k$.
    \item Update $w: w^{k+1}= P_p(w^k - \alpha_k \nabla_w f(w^k)).$
    \item Set $k:=k+1$.
    \end{enumerate}
    {\bf until} $k=K_0$ \newline \newline
    Where $P_p$ is the Euclidean projection on the probability simplex, to make
    sure that $w^{k+1}$ is feasible; see Appendix A. And $\alpha_k$ is a step size
    variable and $K_0$ is a fixed number of iterations. The subgradient of $f$ with respect to $w$ is easily derived
    to be $(\nabla_w f(w))_i = \phi(S_i, \hat S_i) $.
    \newline
    Since it is hard to derive a good stopping criteria, a fixed
    number of iterations will be used.


Also a case where noise is added to the samples is tried. The
performance of the estimation is tested for different levels of
noise and different numbers of sub blocks, see
figure~\ref{TestCaseDeterministicWithNoise}

\begin{figure}[htp]
\begin{center}
\includegraphics[width=0.4\textwidth]{matlabfigures/TestCaseDeterministicWithNoise}
\end{center}
\caption{This shows how the number of sub blocks in the estimation
and the number of samples affect the estimation error}
\label{TestCaseDeterministicWithNoise}
\end{figure}



\end{comment}

\newpage
\bibliography{milestone_refs}

\end{document}
