\documentclass[dvips,landscape]{foils}
\usepackage{graphicx,psfrag}
\input defs.tex
\raggedright \special{! TeXDict begin /landplus90{true}store end }
\renewcommand{\oursection}[1]{ \foilhead[-1.0cm]{#1} }

\title{Covariance Matrix Fusion}
\author{}
\MyLogo{Manuel Gomez-Rodriguez, Robert Henriksson, Erik Rigtorp,
EE364b, Stanford University}
\date{}

\begin{document}
\setlength{\parskip}{0cm}
\maketitle


\begin{center}
\textbf{Manuel Gomez-Rodriguez, Robert Henriksson, Erik Rigtorp}
\end{center}
\vfill
\begin{center}
\textbf{Department of Electrical Engineering}
\end{center}

\begin{center}
\textbf{Stanford University}
\end{center}

\normalsize


\oursection{General problem formulation}

\BIT
\item
This project analyses the problem to estimate a covariance matrix
when given a subset of its symmetric subblocks.

\item A basic mathematical formulation is
\BEAS\label{theprob1}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {w_i \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEAS
\item
Here $S$, $A_i$ and $C_i$ are $n \times n$ symmetric PSD (and real)
matrices, $S$ is the optimization variable. $\hat S_i$ are given
symmetric PSD matrices with dimensions $n_i \times n_i$ ($n_i < n$)
and $w_i$ sets the prior confidence on $\hat S_i$.

\EIT
\newpage


\oursection{Problem formulation}

\BIT
\item
As similarity measure, the Frobenius norm is chosen. This
corresponds to a least-squares objective for each element. \BEAS\
\begin{array}{lll}
\mbox{minimize} & \frac{1}{2} \sum_{i=1}^m {w_i ||P_i^{T} S P_i - \hat S_i||_{F}^2} &\\
\mbox{subject to} & S \succeq 0 &\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEAS
\item It is easy to show that for any symmetric PSD submatrix
$S_i$, there is a $n \times n_i$ matrix $P_i$ such that $S_i =
P_i^{T} S P_i$.

\EIT
\newpage

\oursection{Dual formulation}
\BIT
\item
The Lagrangian can be derived as  \BEAS L = \frac{1}{2} \sum_{i=1}^n
w_i \|P_i^T S P_i - \hat S_i\|_F^2 + \Tr(S(-Z+A(\nu)+C(\mu))) -
\nu^T b - \mu^T
    d, \EEAS
     where $A(\nu)= \sum_{j=1}^p \nu_j A_j$ and $C(\mu)=\sum_{k=i}^m
\mu_k C_k$.

\item
The KKT-conditions give \small \BEAS
    S_{ij} = \frac{1}{\sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k}
    \left( \sum_{(P_k 11^T P_k^T)_{ij} \neq 0} w_k (P_k \hat S_k P_k^T)_{ij} +
    Z_{ij}-A(\nu)_{ij}-C(\mu)_{ij} \right).
\EEAS
\EIT
\newpage


\oursection{Dual formulation} \BIT
\item
\normalsize From the KKT-conditions we can derive that for all
elements, $S_{ij}$, which are not covered by any of the given
submatrices the following must hold
 \BEAS
    Z_{ij} - A(\nu)_{ij} - C(\mu)_{ij} = 0,
\EEAS
\item
In case of omitting the positive semidefinite requirement and the
linear constraints, for every element $S_{ij}$ that is
\emph{covered} by at least one estimate $\hat S_k$, it is optimal to
simply let it be the weighted average of the respective component in
the estimates $\lbrace \hat S_k \rbrace_{k=1}^m$.
\EIT
\newpage

\oursection{Test case} \BIT
\item In order to test the basic formulation we have generated a test case
where we start with a matrix $S^\star$ $20 \times 20$ and derives
subblocks from it and tries to estimate $S^\star$ from the blocks:
\BIT
\item Optimization variable $S \in \reals^{20 \times 20}$.
\item Given data $\hat S_i \in \reals^{8 \times 8}$ symmetric subblocks of $S^\star$
\item 3-10 subblocks, $\hat S_i$, are used in the estimations
\EIT
\item Different instances are generated\BIT
\item Perfect estimates of the subblocks: $\hat S_i = P_i^{T} S^\star P_i$
\item Noisy subblocks $\hat S_i \neq P_i^{T} S^* P_i$
\EIT
\EIT
\vfill


\oursection{Results} \BIT
\item
The estimation error $\|S-S^\star\|_F$ goes down as the number of
subblocks used increase
\begin{center}
\includegraphics[width=0.6\textwidth]{matlabfigures/Testcase_NumOfBlocks.eps}
\end{center}
\EIT
 \oursection{Results} \BIT
\item The estimation for areas not covered by any
    subblock
    is still inaccurate
\begin{center}
\includegraphics[width=1\textwidth]{matlabfigures/TestCase_ImageRelativeErrorEPS.eps}
\end{center}
\EIT


\oursection{Results} \BIT
\item Noise is rejected more effectively when several subblocks are used
in the estimation
\begin{center}
\includegraphics[width=0.6\textwidth]{matlabfigures/TestCaseWithNoise.eps}
\end{center}
\EIT


\oursection{Stochastic formulation} \BIT
\item A stochastic formulation can also be adapted when there
    is uncertainty in the data. A basic formulation is
    \BEAS
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\Expect \phi(S_i, \hat S_i)} & \\
\mbox{subject to} & S \succeq 0 & \\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q.
\end{array}
\EEAS \EIT

\oursection{Stochastic formulation} \BIT
\item We can also model every $\hat S_i \in \lbrace \hat S_j
    \rbrace_{j=1}^m$ by an unknown-but-bounded
    model. For every $S_i$, we are given a set
    $\mathcal{M}_i$ of possible values, and we judge the
    similarity between $S_i$ and $\hat S_i$ by the worst value
    of the convex similarity function over the set,
    \BEAS\label{ss3}
\begin{array}{lll}
\mbox{minimize} & \sum_{i=1}^m {\sup_{S_i \in \mathcal{M}_i} \phi(S_i, \hat S_i)} &\\
\mbox{subject to} & S \succeq 0\\
& \Tr A_j S \leq b_j, & i = 1 \ldots p\\
& \Tr C_k S = c_k, & k = 1 \ldots q,
\end{array}
\EEAS


\EIT


\oursection{Additional constraints} \BIT
\item
One way to attempt to reach a more accurate solution is to add
constraints based on the variances given. We know that \BEAS
\begin{array}{ll} \label{cs}
(E(XY))^2<E(X^2)E(Y^2)
\end{array}
\EEAS must hold, so if we assume that the given variance of each
stock is accurate to $\rho\mbox{ \%}$, we can bound each element of
$S$.
\BEAS\label{theprob1AddConstr} |S_{ij}| \leq (1+\rho)\sqrt{\hat
S_{ii} \hat S_{jj}} \quad i,j = 1 \ldots n \EEAS

\item
It is important to note that this constraint is equivalent to $S
\succeq 0$ if $\hat S_{ii} = S_{ii} \quad i = 1 \ldots n$  and
$\rho=0$.

\EIT



\end{document}
