\section{Probability Theory}


\subsection*{Problem 1}
First we note that 
\[F_Y(y) = \begin{cases}
            0 & \text{for } y\leq 0 \\
            1 & \text{for } y\geq 1
           \end{cases}
\]

In $[0,1]$, we can say that $F_Y(y)$ is differentiable, strictly positive and strictly increasing, because it is
the integral of the strictly increasing continuous and positive function $F_X(x)$. Furthermore, we can say that
\[ f_Y(y) = \frac{dF_Y}{dy} = \begin{cases}
                                0 & \text{for } y\leq 0 \\
                                0 & \text{for } y\geq 1
                               \end{cases}
\] and that $f_Y(y)$ is positive and continuous on $[0,1]$.


\subsection*{Problem 2}

Show that the sum of two independent random Gaussian variables $X_{1}$,
$X_{2}$ is a Gaussian. \\
We can define every Gaussian random variable $X\sim\mathcal{N}(u,\Sigma)$
using a linear transformation on 
\[
Z\sim\mathcal{N}(0,I)
\]
\[
X=\mu+LZ
\]
 where $L$ is defined by the Cholesky decomposition as 
 \[
 L=\Sigma L^{-T}
 \]
 Hence $X_{1}$and $X_{2}$ can be defined as

 \[
 X_{1}=\mu_{1}+L_{1}Z
 \]
 \[
 X_{2}=\mu_{2}+L_{2}Z
 \]
  if we now sum these two quantities we obtain a new random variable
      $X_{3}$ and we then obtain
      \[
      X_{3}=(\mu_{1}+\mu_{2})+(L_{1}+L_{2})Z
      \]
       $X_{3}$ is again a random variable with the following distribution
       $X_{3}\sim\mathcal{N}(\mu_{1}+\mu_{2},(L_{1}+L_{2})(L_{1}+L_{2})^{T})$.

\subsection*{Problem 3}

It is sufficient to show that the joint probability density function is equal to the product 
of the individual probability densities:
\[
    f_{Z}(z) = f_X(x)f_Y(y).
\]
A direct calculation yields the result:
\[
    \rho(X,Y) = 0, \textrm{ hence } \Sigma = \begin{pmatrix}
                                               \sigma_x^2 & 0   \\
                                               0          & \sigma_y^2
                                             \end{pmatrix}
\]

\[
    \textrm{ and } \Sigma^{-1} = \begin{pmatrix}
                                   \sigma_x^{-2} & 0   \\
                                   0          & \sigma_y^{-2}
                                 \end{pmatrix}
\]
Also, $\det\Sigma = \sigma_x^2 \sigma_y^2$. Now, since $Z=(X,Y)$ is bivariate, we know $f_Z(x,y)$:
\begin{eqnarray*}
    f_Z(z) &=& \frac{1}{2\pi\sqrt{|\Sigma|}} \exp\left(-\frac{1}{2}(z-\mu)^T\Sigma^{-1}(z-\mu)\right)\\
           &=& \frac{1}{2\pi\sigma_x \sigma_y} \exp\left(-\frac{1}{2} \begin{pmatrix}x-\mu_x \\ y-\mu_y\end{pmatrix}^T \begin{pmatrix}\sigma_x^{-2} & 0 \\ 0 & \sigma_y^{-2}\end{pmatrix}\begin{pmatrix}x-\mu_x \\ y-\mu_y\end{pmatrix}\right) \\
           &=& \frac{1}{2\pi\sigma_x \sigma_y} \exp\left(-\frac{1}{2} \left(\left(\frac{x-\mu_x}{\sigma_x}\right)^2 + \left(\frac{y-\mu_y}{\sigma_y}\right)^2\right)\right) \\
           &=& \frac{1}{2\pi\sigma_x \sigma_y} \exp\left(-\frac{1}{2} \left(\frac{x-\mu_x}{\sigma_x}\right)^2 \right)\exp \left( -\frac{1}{2}\left(\frac{y-\mu_y}{\sigma_y}\right)^2\right) \\
           &=& f_X(x)f_Y(y) 
\end{eqnarray*}
Hence, $X$ and $Y$ are independent.
