%%%%%%%%%%% EINBINDEN DER PRÄAMBEL
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\input{Preambel}

\title{Summary of AE4E95-11 Satellite Signals and Data Processing}
\author{Ingo Gerth, \today}
\date{}

%%%%%%%%%%% BEGINN DES DOKUMENTES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\maketitle

\begin{multicols}{2}

\section*{Symbols and Definitions}
Let's start slowly\dots

$y$ -- observation vector\\
$A$ -- model, or design matrix\\
$x$ -- vector of unknowns\\
$e$ -- residual or error vector

$\hat{x}$ -- least squares estimate\\
$\hat{e}$ -- least squares residuals


$\underline{a}$ -- stochastic vector\\
$\underline{\alpha}$ -- stochastic variable\\
$\alpha$ -- stochastic value, or \emph{sample}

New concept: mean or rather \emph{mathematical expectation}. 
\[M=E\lbrace \alpha \rbrace\]
Following relations hold
\begin{align*}
 \underline{f}&=F\underline{a}\\
 E\lbrace \underline{f} \rbrace& = F E\lbrace \underline{a} \rbrace
\end{align*}

Dispersion
\begin{align*}
	&D \lbrace \underline{a} \rbrace = \underline{Q}_a\\
	&\underline{Q}_a=\begin{bmatrix}
	\sigma_\alpha^2 		& \sigma_{\alpha\beta} 	& \cdots \\
	\sigma_{\alpha\beta} 	& \sigma_\beta^2		& \\
	\vdots					&						& \ddots
	\end{bmatrix}\\
	& D\lbrace\underline{f}\rbrace = \underbrace{F D\lbrace\underline{a}\rbrace F^T}_{\text{must be square}}
\end{align*}

Must know linear algebra relations:
\begin{align*}
 &(A-B)^T =A^T-B^T\\
 &(AB)^T=B^TA^T\\
 &A^TB^TC=C^TBA
\end{align*}

\section{A simple matrix equation}

\begin{align}
	\underbrace{y}_{m\times1}=\underbrace{A}_{m\times n}\underbrace{x}_{n\times 1}
\end{align}
Given $A,x$, simple problem\\
Given $A,y$, $x$ has to be \emph{estimated}\\
Given $y,x$, $A$ has to be found found by regression. Often a calibration problem

\section{Least squares}

Data of an observations can be put in a measurement vector $y$. However, this data will have some error. Therefore, with a model $A$ a value $x$ can be estimated if the error $e$ is taken into account:
\begin{align}
	y=Ax+e
\end{align}
This is an underdetermined expression, and hence an extra condition is needed. This is 
\begin{align}
 |\hat{e}|=|y-A\hat{x}|=\text{minimal}
\end{align}
which menas that the residuals are minimized.  This is the case of the residual is orthogonal to the estimate. These are the conditions for the least squares method, expressed mathematically:
\begin{align}
1.)\ \hat{e}\bot A\quad 2.)\ \hat{e}^T\hat{e}=\text{min.}
\end{align}
Derive expression for LSQ estimation:
\begin{align}
\hat{e}\bot A \rightarrow \hat{e}^TA=0 \rightarrow A^T\hat{e}=0
\end{align}
Error given by:
\begin{align}
\hat{e} = y-A \hat{x}
\end{align}
From $A^T\hat{e}=0$:
\begin{align}
A^T(y-A\hat{x})=0\\
A^Ty-A^TA\hat{x}=0
\end{align}
This way we arrive at the \emph{normal equation}:
\begin{align}
\boxed{\underbrace{A^TA}_{\text{normal matrix}}\hat x=A^Ty}
\end{align}
This gives the equation for the LSQ estimate:
\begin{align}
\boxed{\hat{x}=(A^TA)^{-1}A^Ty}
\end{align}
From this one can easily derive the \emph{orthogonal projector} $P_{A^\bot}$:
\begin{align}
\hat{e}	&=y-A\hat{x}=y-A(A^TA)^{-1}A^Ty\\
		&=(I-A(A^TA)^{-1}A^T)y\\
		&=P_{A^\bot}y
\end{align}
Proof assumption: Is $\hat{e}^T\hat{e}$ really minimal?
\begin{align}
\hat{e}^T\hat{e}	&=(y-A\hat{x})^T(y-A\hat{x})\\
					&=y^Ty-\hat{x}^TA^Ty-y^TA\hat{x}+\hat{x}^TA^TA\hat{x}\\
					&=0 -2y^TA\hat{x}+\underbrace{\hat{x}^TA^TA\hat{x}}_{\text{use chain rule}}\\
\frac{\partial (\hat{e}^T\hat{e})}{\partial \hat{x}} & = -2y^T A+2A^TA\hat{x}=0
\end{align}
This is again equal to the normal equation
\begin{align}
\boxed{A^TA\hat{x}=A^Ty}
\end{align}
proving that the residuals are indeed minimal.
\end{multicols}
\end{document}