\documentclass[12pt]{article}
\usepackage{fullpage,graphicx,psfrag,url,verbatim}
\usepackage[small,bf]{caption}
\usepackage{subfigure}
\setlength{\captionmargin}{30pt}

\input defs.tex
\newcommand{\sign}{\mathop{\bf sign}}

\bibliographystyle{alpha}

\title{Covariance Matrix Fusion}
\author{Manuel Gomez-Rodriguez, Robert Henriksson, Erik Rigtorp}

\begin{document}
\maketitle

% Title.
% ------

%\address{}

\abstract

We consider the problem of estimating a positive semidefinite (PSD)
matrix $S$ that best fits a set of given PSD symmetric submatrices
under some convex similarity measure, and some linear bounds on $S$.
This can be formulated as a convex optimization
problem~\cite{BoV:04}.

The quality of estimation for every element of the matrix $S$ is
considered equally important and therefore, different entry-wise
norms are proposed as similarity measures (\eg, the Frobenius norm
or the $l_{1}$ norm)~\cite{BoLin:05}. The dual formulation is
discussed using the Frobenius norm. Furthermore, different
stochastic formulations are explored.

One application we consider is the estimation of the covariance matrix for the daily return of a collection of
stocks given a set of estimated covariance submatrices from different sources. Performance of the estimation will be discussed for
different constraints and availability of data.

\section{The problem}

This project concerns the following problem; We are given a set
of symmetric positive semidefinite (PSD) submatrices $\lbrace
\hat S_i \rbrace_{i=1}^m$ and we seek $S$, the symmetric PSD
matrix that has the nearest PSD submatrices $\lbrace S_i
\rbrace_{i=1}^m$ to their corresponding given submatrices
$\lbrace \hat S_i \rbrace_{i=1}^m$, in terms of a convex
similarity measure $\phi(\cdot)$, and additionally satisfies
some linear constraints. This is a convex optimization
problem~\cite{boyd1994lmi}.

Since any real-valued linear function $f$ on the set of symmetric
PSD matrices can be expressed as $f(U) = \Tr AU$ for some symmetric
matrix A,~\cite{BoLin:05}, any affine equality or inequality
constraints can be expressed as
\[
\begin{array}{lll}
\Tr A_j S \leq b_j & \mbox{or} & \Tr C_k S = c_k.
\end{array}
\]

The general problem can then be formulated as
\BEQ\label{theprob1}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {w_i \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ Here $S$, $A_i$ and $C_i$ are $n \times n$ symmetric PSD
(and real) matrices, $S$ is the optimization variable, $\lbrace
\hat S_i \rbrace_{i=1}^m$, where $\hat S_i$ is an $n_i \times
n_i$ ($n_i < n$) symmetric PSD matrix, $\hat S_i$ is problem
data and $w_i$ sets the prior confidence on $\hat S_i$. It is
easy to show that for any symmetric PSD submatrix $S_i$, there
is a $n \times n_i$ matrix $P_i$ such that $S_i = P_i^{T} S
P_i$.

The quality of estimation for every element of the matrix $S$ is
considered equally important and therefore, an entry-wise norm is
adopted as similarity measure. In particular, a Frobenius norm is
chosen in order to have a least-squares adjustment. Our problem can
be considered an extension of the \emph{least-squares covariance
adjustment problem} (LSCAP)~\cite{BoLin:05}. Then, we can
rewrite~(\ref{theprob1}) as \BEQ\label{theprob2}
\begin{array}{lll}
\mbox{minimize} & \frac{1}{2} \sum_{i=1}^m {w_i ||P_i^{T} S P_i - \hat S_i||_{F}^2} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ

\subsection{Dual formulation}

Proceeding in a similar manner as in~\cite{BoLin:05}, we derive a
dual formulation of (\ref{theprob2}). We associate the Lagrange
multipliers $\nu_1, . . . ,\nu_p$ with the equality constraints;
$\mu_1, . . . , \mu_m$ with the inequality constraints; and the
symmetric $n \times n$ matrix $Z$ with the matrix inequality $S
\succeq 0$. The Lagrangian is then \BEQ \label{lag}
    L(S,Z,\nu,\mu) = \frac{1}{2} \sum_{i=1}^n w_i \|P_i^T S P_i - \hat S_i\|_F^2 + \Tr(S(-Z+A(\nu)+C(\mu))) - \nu^T b - \mu^T d,
\EEQ where $A(\nu)=\sum_{j=1}^p \nu_j A_j$ and $C(\mu)=\sum_{k=i}^m
\mu_k C_k$. The Lagrangian is convex in $S$ and concave in $Z$,
$\nu$ and $\mu$, as expected. To obtain $S$ such that the Lagrangian
is minimized, we calculate the gradient of (\ref{lag}) with respect
to $S$, \BEAS
    \nabla_S L(S,Z,\nu,\mu) = \frac{1}{2} \sum^{n}_{i=1} w_{i}
    \nabla_S (\|P_i^T S P_i - \hat S_i \|_F^2) - Z + A(\nu) + C(\mu),
\EEAS
and, \BEAS \label{eq:matrixgrad}
    \nabla_S (\|P_i^T S P_i - \hat S_i \|_F^2) &=& \nabla_S (\Tr((P_i^T S P_i - \hat S_i)^T (P_i^T S  P_i - \hat S_i))) \\
    &=& \nabla_S (\Tr(P_i^T S P_i P_i^T S P_i - P_i^T S P_i \hat S_i - \hat S_i P_i^T S P_i - \hat S_i P_i^T S P_i - \hat S_i^T \hat S_i)) \\
    &=& 2 P_i P_i^T S P_i P_i^T - 2 P_i \hat S_i P_i^T.
\EEAS
The last step follows from basic differential
calculus~\cite{PePed:08}. Then, setting $\nabla_S L(S,Z,\nu,\mu) =
0$, we have, \BEQ \label{gradfinal}
    \nabla_S(S,Z,\nu,\mu)=\sum^{n}_{i=1} w_{i} (P_i P_i^T S P_i P_i^T - P_i \hat S_i P_i^T) - Z + A(\nu) + C(\mu)=0.
\EEQ We note that $P_i P_i^T S P_i P_i^T$ sets all the elements not
in $S_i$ to $0$ and $P_i \hat S_i P_i$ expands $\hat S_i$ to $n
\times n$ while setting the elements not in $\hat S_i$ to $0$. Then,
for each element $S_{ij}$ that has a corresponding element in the
set $\lbrace \hat S_k \rbrace_{k=1}^m$ we have that,
\BEAS
    \sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k (S_{ij} -
    (P_k \hat S_k P_k^T)_{ij}) = Z_{ij}-A(\nu)_{ij}-C(\mu)_{ij}.
\EEAS Solving this for $S_{ij}$ gives,
\BEAS
    S_{ij} = \frac{1}{\sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k}
    \left( \sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k (P_k \hat S_k P_k^T)_{ij} +
    Z_{ij}-A(\nu)_{ij}-C(\mu)_{ij} \right).
\EEAS For every $S_{ij}$ that does not have a corresponding element
in the set $\lbrace \hat S_k \rbrace_{k=1}^m$, we have that \BEAS
    Z_{ij} - A(\nu)_{ij} - C(\mu)_{ij} = 0,
\EEAS

In case of omitting the positive semidefinite requirement and the
linear constraints, for every element $S_{ij}$ that is
\emph{covered} by at least one estimate $\hat S_k$, it is optimal to
simply let it be the weighted average of the respective component in
the estimates $\lbrace \hat S_k \rbrace_{k=1}^m$.

We can also note that any element that does not have any
corresponding estimate in $\lbrace \hat S_k \rbrace_{k=1}^m$ is
constrained only by the condition of positive semidefiniteness
and the affine constraints.

\begin{figure*}[h!]
\centerline{\subfigure[Number of subblocks vs. estimation
error]{\includegraphics[width=0.4\textwidth]{matlabfigures/Testcase_NumOfBlocks.pdf}
\label{TestCase_NumOfBlocks}} \hfil \subfigure[Level of noise vs.
estimation
error]{\includegraphics[width=0.4\textwidth]{matlabfigures/TestCaseWithNoise.pdf}
\label{TestCaseWithNoise}}} \label{fig:results} \caption{Evolution
of estimation error with respect to the number of subblocks and
level of noise}
\end{figure*}

\begin{figure}[h!]
\begin{center}
\includegraphics[width=1\textwidth]{matlabfigures/matrixImage.png}
\caption{This shows how the number of subblocks affects the relative
estimation error for each element}
\label{TestCase_ImageRelativeError}
\end{center}
\end{figure}

\section{Applications}

Suppose that we are given a number of covariance matrices for
different sets of stocks, and an estimate of a covariance
matrix for all the stocks together is sought. These matrices
can come from different sources.

A mathematical description of this would be the following;
given $n$ covariance matrices $\hat \Sigma_i$ for different
groups of stocks $l_i, i=1, \ldots, n$. The task is to estimate
a covariance matrix $\Sigma$ for the full set of stocks
$L=\bigcup _{i=1}^nl_i$. Also, if the sources of the covariance
matrices have different reliability, then a weight $w_i$ will
be assigned to each given covariance matrix, $\hat \Sigma_i$.
This can now be formulated as an optimization problem as
in~(\ref{theprob2}).

For this example, the covariances between different sets of stocks
are given, from a total of $20$ stocks. The basic deterministic
method as in~(\ref{theprob2}) is used to estimate the original
covariance matrix, using a different number of subblocks. As can be
seen in figure~\ref{TestCase_NumOfBlocks} the estimation error
decreases in general. But as figure
~\ref{TestCase_ImageRelativeError} shows, the estimation is still
inaccurate for the regions not covered by any subblock. This occurs
since the only information available for those regions is the
constraint of the full matrix to be positive semidefinite.

Figure~\ref{TestCaseWithNoise} shows how noise affects the
estimation. Each subblock is now given as a PSD matrix with noise
added to the original submatrices. The figure shows that the noise
rejection is more effective when more blocks are used, which is to
be expected when independent zero mean noise is used.

\section{Extensions}
\subsection{Robust formulation}

In the stochastic formulation of the problem, $\lbrace \hat S_i
\rbrace_{i=1}^m$ is a collection of random variables with some
known distribution in the space of symmetric PSD matrices. In
this way, we model uncertainty in the estimation of the
submatrices that are given. Under this assumption, we judge the
similarity between $S_i$ and $\hat S_i$ by the expected value
of the similarity functions,
\[
\Expect \phi(S_i, \hat S_i),
\]
where the expectation is over $\hat S_i$. The stochastic problem can
then be expressed as \BEQ\label{rf1}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\Expect \phi(S_i, \hat S_i)} & \\
\mbox{subject to} & S \succeq 0 & \\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEQ If we use a Monte Carlo approximation for the expectation, we
can rewrite~(\ref{rf1}) as \BEQ\label{rf2}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\sum_{j=1}^{N_i} \frac{1}{N_i} \phi(S_i, \hat S_i^{(j)})} & \\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEQ where $N_i$ is the number of samples drawn for every $\hat
S_i$. An improvement in the accuracy is expected if we increase
$N_i$.

We can also consider the certainty equivalent, in which the random
variable in each similarity function has been replaced by its
expected value, of the original stochastic formulation~(\ref{rf1}),
\BEQ\label{rf3}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\phi(S_i, \Expect \hat S_i)} &\\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEQ Using Jensen's inequality,
\[
\Expect \phi(S_i, \hat S_i) \geq \phi(S_i, \Expect \hat S_i),
\]
the optimal value of~(\ref{rf3}) gives a lower bound on the optimal
value of the stochastic problem~(\ref{rf1}).

We can also model every $\hat S_i \in \lbrace \hat S_j
\rbrace_{j=1}^m$ by an unknown-but-bounded model~\cite{SkBo}. For
every $S_i$, we are given a set $\mathcal{M}_i$ of possible values,
and we judge the similarity between $S_i$ and $\hat S_i$ by the
worst value of the convex similarity function over the set,
\BEAS\label{ss3}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\sup_{S_i \in \mathcal{M}_i} \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEAS

Finally, \emph{chance constraints} can also be introduced as
follows, \BEAS
\begin{array}{ll}
\Prob(b_i \geq \Tr A_i S) \geq \eta_b \\
\Prob(c_i = \Tr C_i S) \geq \eta_c,
\end{array}
\EEAS where $b_i$, and $c_i$ are random variables with known
distributions and $\eta_b$ and $\eta_c$ are lower bounds in the
probabilities.

\subsection{Additional constraints}

One way to attempt to reach a more accurate solution is to add
constraints based on the variances given. We know that
\BEQ
\begin{array}{ll} \label{cs}
(E(XY))^2<E(X^2)E(Y^2)
\end{array}
\EEQ must hold by the Cauchy-Schwarz inequality. Thus, if we
make the assumption that the given variance of each stock is
accurate to $\rho\mbox{ \%}$ and we apply (\ref{cs}), we can
bound each element of $S$ as follows,
\BEQ\label{theprob1AddConstr} |S_{ij}| \leq (1+\rho)\sqrt{\hat
S_{ii} \hat S_{jj}} \quad i,j = 1 \ldots n \EEQ

If there are inconsistencies in the variance of the given
submatrices, an average for each variance can instead be used. It is
important to note that this constraint is equivalent to $S \succeq
0$ if $\hat S_{ii} = S_{ii} \quad i = 1 \ldots n$  and $\rho=0$.

\section*{Acknowledgment}

We would like to thank Jo\"elle Skaf for proposing us the problem.

\bibliography{final_report_refs}

\end{document}
