% Emacs, this is -*-latex-*-

\ex{Mean field variational inference I}
\label{ex:MFVI-I}
Let $\ELBOx(q)$ be the evidence lower bound for the marginal $p(\x)$ of a joint pdf/pmf $p(\x,\y)$,
\begin{equation}
  \ELBOx(q) = \E_{q(\y|\x)} \left[\log \frac{p(\x,\y)}{q(\y|\x)}\right].
  \end{equation}
Mean field variational inference assumes that the variational distribution $q(\y|\x)$ fully factorises, i.e.
\begin{equation}
  q(\y | \x) = \prod_{i=1}^d q_i(y_i | \x),
\end{equation}
when $\y$ is $d$-dimensional. An approach to learning the $q_i$ for
each dimension is to update one at a time while keeping the others fixed. We
here derive the corresponding update equations.

\begin{exenumerate}
\item Show that the evidence lower bound $\ELBOx(q)$ can be written as
\begin{equation}
  \ELBOx(q) = \E_{q_1(y_1|\x)} \E_{q(\y_{\setminus 1}|\x)}\left[ \log p(\x,\y)\right] - \sum_{i=1}^d \E_{q_i(y_i|\x)} \left[\log q_i(y_i|\x)\right]
\end{equation}
where $q(\y_{\setminus 1}|\x) = \prod_{i=2}^d q_i(y_i | \x)$ is the variational distribution without $q_1(y_1|\x)$.

\begin{solution}
  This follows directly from the definition of the ELBO and the assumed factorisation of $q(\y|\x)$. We have
  \begin{align}
    \ELBOx(q) &= \E_{q(\y|\x)} \log p(\x,\y) - \E_{q(\y|\x)} \log q(\y|\x) \\
    & = \E_{ \prod_{i=1}^d q_i(y_i | \x)}  \log p(\x,\y) - \E_{ \prod_{i=1}^d q_i(y_i | \x)} \sum_{i=1}^d \log q_i(y_i|\x) \\
    & = \E_{ \prod_{i=1}^d q_i(y_i | \x)}  \log p(\x,\y) - \sum_{i=1}^d  \E_{q_i(y_i | \x)} \log q_i(y_i|\x) \\
    & = \E_{q_1(y_1|\x)} \E_{ \prod_{i=2}^d q_i(y_i | \x)}  \log p(\x,\y) - \sum_{i=1}^d  \E_{q_i(y_i | \x)} \log q_i(y_i|\x)\\
    & = \E_{q_1(y_1|\x)} \E_{q(\y_{\setminus 1}|\x)}\left[ \log p(\x,\y)\right] - \sum_{i=1}^d \E_{q_i(y_i|\x)} \left[\log q_i(y_i|\x)\right]
  \end{align}
  We have here used the linearity of expectation. In case of continuous random variables, for instance, we have
  \begin{align}
 \E_{ \prod_{i=1}^d q_i(y_i | \x)} \sum_{i=1}^d \log q_i(y_i|\x) & = \int  q_1(y_1 | \x)\cdot \ldots\cdot q_d(y_d|\x)  \sum_{i=1}^d \log q_i(y_i|\x) d y_1\ldots d y_d\\
 & =  \sum_{i=1}^d \int q_1(y_1 | \x)\cdot \ldots \cdot q_d(y_d|\x) \log q_i(y_i|\x) d y_1\ldots d y_d\\
 & =  \sum_{i=1}^d \int q_i(y_i | \x)\log q_i(y_i|\x) d y_i \underbrace{\int \prod_{j\neq i} q_j(y_j|\x) d y_j}_{=1}\\
 & =  \sum_{i=1}^d E_{q_i(y_i|\x)} \log q_i(y_i|\x)
\end{align}
For discrete random variables, the integral is replaced with a sum and leads to the same result.

\end{solution}

\item Assume that we would like to update $q_1(y_1 |\x)$ and that the
  variational marginals of the other dimensions are kept fixed. Show that
  \begin{equation}
    \argmax_{q_1(y_1|\x)} \ELBOx(q) = \argmin_{q_1(y_1|\x)} \KL(q_1(y_1|\x) || \bar{p}(y_1|\x))
  \end{equation}
  with
\begin{equation}
  \log \bar{p}(y_1|\x) = \E_{q(\y_{\setminus 1}|\x)}\left[ \log p(\x,\y)\right] + \text{const},
\end{equation}
where $\text{const}$ refers to terms not depending on $y_1$. That is,
\begin{equation}
\bar{p}(y_1|\x) = \frac{1}{Z} \exp\left[ \E_{q(\y_{\setminus
      1}|\x)}\left[ \log p(\x,\y)\right] \right],
\end{equation}
where $Z$ is the normalising constant. Note that variables $y_2, \ldots, y_d$
are marginalised out due to the expectation with respect to $q(\y_{\setminus
  1}|\x)$.

  \begin{solution}
    Starting from 
    \begin{equation}
      \ELBOx(q) = \E_{q_1(y_1|\x)} \E_{q(\y_{\setminus 1}|\x)}\left[ \log p(\x,\y)\right] - \sum_{i=1}^d \E_{q_i(y_i|\x)} \left[\log q_i(y_i|\x)\right]
    \end{equation}
  we drop terms that do not depend on $q_1$. We then obtain
  \begin{align}
      J(q_1) & =  \E_{q_1(y_1|\x)} \E_{q(\y_{\setminus 1}|\x)}\left[ \log p(\x,\y)\right] -  \E_{q_1(y_1|\x)} \left[\log q_1(y_1|\x)\right]\\
            & =  \E_{q_1(y_1|\x)} \left[\log \bar{p}(y_1|\x) - \text{const} \right]- \E_{q_1(y_1|\x)} \left[\log q_1(y_1|\x)\right]\\
            & = \E_{q_1(y_1|\x)} \log \bar{p}(y_1|\x) - \E_{q_1(y_1|\x)} \left[\log q_1(y_1|\x)\right] - \text{const}\\
            & = \E_{q_1(y_1|\x)}\left[ \log \frac{\bar{p}(y_1|\x)}{ q_1(y_1|\x)} \right] - \text{const}\\
            & = -\KL( q_1(y_1|\x) || \bar{p}(y_1|\x) ) - \text{const}
    \end{align}
    Hence
    \begin{equation}
      \argmax_{q_1(y_1|\x)} \ELBOx(q) = \argmin_{q_1(y_1|\x)} \KL(q_1(y_1|\x) || \bar{p}(y_1|\x))
    \end{equation}
    
  \end{solution}

\item Conclude that given $q_i(y_i|\x)$, $i=2, \ldots, d$, the optimal $q_1(y_1|\x)$ equals $\bar{p}(y_1|\x)$.

  This then leads to an iterative updating scheme where we cycle
  through the different dimensions, each time updating the corresponding marginal variational distribution according to:
  \begin{align}
    q_i(y_i|\x) &=  \bar{p}(y_i|\x), &  \bar{p}(y_i|\x) &= \frac{1}{Z} \exp\left[ \E_{q(\y_{\setminus i}|\x)}\left[ \log p(\x,\y)\right] \right]
  \end{align}
  where $q(\y_{\setminus i}|\x) = \prod_{j \neq i} q(y_j | \x)$ is the product of all marginals without marginal $q_i(y_i|\x)$.

  \begin{solution}
    This follows immediately from the fact that the KL divergence is
    minimised when $q_1(y_1|\x) = \bar{p}(y_1|\x)$. Side-note: The
    iterative update rule can be considered to be coordinate ascent
    optimisation in function space, where each ``coordinate''
    corresponds to a $q_i(y_i|\x)$.
    
  \end{solution}
\end{exenumerate}

% --------------------------------------------------

\ex{Mean field variational inference II}
Assume random variables $y_1, y_2, x$ are generated according to the following process
\begin{align}
  y_1 &\sim \Gauss(y_1; 0, 1) &   y_2 &\sim \Gauss(y_2; 0, 1) \\
  n &\sim \Gauss(n; 0, 1)    &    x & = y_1+y_2+n & 
\end{align}
where $y_1, y_2, n$ are statistically independent.

\begin{exenumerate}

\item $y_1, y_2, x$ are jointly Gaussian. Determine their mean and their
  covariance matrix.

  \begin{solution}
    
    The expected value of $y_1$ and $y_2$ is zero. By linearity of
    expectation, the expected value of $x$ is
    \begin{equation}
      \E(x) = \E(y_1) + \E(y_2) + \E(n) = 0
    \end{equation}
    The variance of $y_1$ and $y_2$ is 1. Since $y_1, y_2, n$ are statistically independent,
    \begin{equation}
      \Var(x) = \Var(y_1) + \Var(y_2) + \Var(n) = 1 + 1 + 1 = 3.
    \end{equation}
    The covariance between $y_1$ and $x$ is
    \begin{align}
      \text{cov}(y_1, x) &= \E( (y_1-\E(y_1))(x-\E(x))) = \E( y_1 x) \\
      &= \E( y_1(y_1+y_2+n) ) = \E(y_1^2) + \E(y_1 y_2) + \E(y_1 n)\\
      &= 1 +  \E(y_1)\E(y_2) + \E(y_1)\E(n)\\
      &= 1 + 0 + 0
    \end{align}
    where we have used that $y_1$ and $x$ have zero mean and the independence assumptions.

    The covariance between $y_2$ and $x$ is computed in the same way and equals 1 too.

    We thus obtain the covariance matrix $\Sigmab$,
    \begin{equation}
      \Sigmab = \begin{pmatrix}
        1 & 0 & 1\\
        0 & 1 & 1\\
        1 & 1 & 3
      \end{pmatrix}
    \end{equation}
    
  \end{solution}

\item The conditional $p(y_1, y_2 | x)$ is Gaussian with mean $\m$ and covariance $\C$,
  \begin{align}
    \m &= \frac{x}{3} \begin{pmatrix}
      1\\
      1
    \end{pmatrix}
    &
    \C & = \frac{1}{3} \begin{pmatrix}
      2 &-1 \\
     -1 & 2
    \end{pmatrix}
  \end{align}
  Since $x$ is the sum of three random variables that have the
  same distribution, it makes intuitive sense that the mean assigns
  $1/3$ of the observed value of $x$ to $y_1$ and $y_2$. Moreover,
  $y_1$ and $y_2$ are negatively correlated since an increase in $y_1$
  must be compensated with a decrease in $y_2$.

  Let us now approximate the posterior $p(y_1, y_2 | x)$ with mean
  field variational inference. Determine the optimal variational
  distribution using the method and results from \exref{ex:MFVI-I}. You may use that
  \begin{align}
    p(y_1, y_2, x) &= \Gauss\left( (y_1, y_2, x); \zerob, \Sigmab \right) &  \Sigmab &= \begin{pmatrix} 
      1 & 0 & 1\\
      0 & 1 & 1\\
      1 & 1 & 3
    \end{pmatrix}
    &
    \Sigmab^{-1} &= \begin{pmatrix} 
      2 & 1 & -1\\
      1 & 2 & -1\\
      -1 & -1 & 1
    \end{pmatrix}
  \end{align}
  
  \begin{solution}
    The mean field assumption means that the variational distribution is assumed to factorise as
    \begin{equation}
      q(y_1, y_2 | x) = q_1(y_1 | x) q_2(y_2 | x)
    \end{equation}
    From \exref{ex:MFVI-I}, the optimal $q_1(y_1|x)$ and $q_2(y_2|x)$ satisfy
     \begin{align}
       q_1(y_1|x) &=  \bar{p}(y_1|x), &  \bar{p}(y_1|x) &= \frac{1}{Z} \exp\left[ \E_{q_2(y_2|x)}\left[ \log p(y_1, y_2, x)\right] \right]\\
       q_2(y_2|x) &=  \bar{p}(y_2|x), &  \bar{p}(y_2|x) &= \frac{1}{Z} \exp\left[ \E_{q_1(y_1|x)}\left[ \log p(y_1, y_2, x)\right] \right]
     \end{align}
    Note that these are coupled equations: $q_2$ features in the
    equation for $q_1$ via $\bar{p}(y_1|x)$, and $q_1$ features in
    the equation for $q_2$ via $\bar{p}(y_2|x)$. But we have two
    equations for two unknowns, which for the Gaussian joint model
    $p(x, y_1, y_2)$ can be solved in closed form.

    Given the provided equation for $p(y_1, y_2, x)$, we have that
    \begin{align}
      \log p(y_1, y_2, x) & = -\frac{1}{2} \begin{pmatrix}
        y_1\\
        y_2\\
        x
      \end{pmatrix}^\top
      \begin{pmatrix}
        2 & 1 & -1\\
        1 & 2 & -1\\
        -1 & -1 & 1
      \end{pmatrix}
      \begin{pmatrix}
        y_1\\
        y_2\\
        x
      \end{pmatrix} + \text{const}\\
        & = -\frac{1}{2} \left( 2 y_1^2 + 2 y_2^2 + x^2 + 2 y_1 y_2 - 2 y_1 x - 2y_2 x \right) + \text{const}
    \end{align}
    
    Let us start with the equation for $\bar{p}(y_1|x)$. It is easier to work in the logarithmic domain, where we obtain:
    \begin{align}
      \log  \bar{p}(y_1|x) & =  \E_{q_2(y_2|x)}\left[ \log p(y_1, y_2, x)\right] + \text{const}\\
      & = -\frac{1}{2} \E_{q_2(y_2|x)}\left[  2 y_1^2 + 2 y_2^2 + x^2 + 2 y_1 y_2 - 2 y_1 x - 2y_2 x \right] + \text{const}\\
      & = -\frac{1}{2} \left(2 y_1^2 + 2 y_1 \E_{q_2(y_2|x)}[y_2] - 2 y_1 x \right) + \text{const}\\
      & = -\frac{1}{2} \left( 2 y_1^2 +2y_1 m_2 - 2 y_1 x \right) + \text{const}\\
      & = -\frac{1}{2} \left( 2 y_1^2 - 2 y_1 (x-m_2) \right) + \text{const}
      \label{eq:barp1}
    \end{align}
    where we have absorbed all terms not involving $y_1$ into the constant. Moreover, we set $\E_{q_2(y_2|x)}[y_2]=m_2$.

    Note that an arbitrary Gaussian density $\Gauss(y; m, \sigma^2)$
    with mean $m$ and variance $\sigma^2$ can be written in the
    log-domain as
    \begin{align}
      \log \Gauss(y; m, \sigma^2) & = -\frac{1}{2}\frac{(y-m)^2}{\sigma^2} + \text{const}\\
      & = -\frac{1}{2} \left( \frac{y^2}{\sigma^2} -2y \frac{m}{\sigma^2} \right) + \text{const}
      \label{eq:loggauss}
    \end{align}
    Comparison with \eqref{eq:barp1} shows that $\bar{p}(y_1|x)$, and
    hence $q_1(y_1 | x)$, is Gaussian with variance and mean equal to
    \begin{align}
      \sigma_1^2 &= \frac{1}{2} & m_1 & = \frac{1}{2}(x-m_2)
    \end{align}
    Note that we have not made a Gaussianity assumption on $q_1(y_1 |
    x)$. The optimal $q_1(y_1 | x)$ turns out to be Gaussian because
    the model $p(y_1, y_2, x)$ is Gaussian.
    
    The equation for $\bar{p}(y_2|x)$ gives similarly
    \begin{align}
      \log  \bar{p}(y_2|x) & =  \E_{q_1(y_1|x)}\left[ \log p(y_1, y_2, x)\right] + \text{const}\\
      & = -\frac{1}{2} \E_{q_1(y_1|x)}\left[  2 y_1^2 + 2 y_2^2 + x^2 + 2 y_1 y_2 - 2 y_1 x - 2y_2 x \right] + \text{const}\\
      & = -\frac{1}{2} \left(2 y_2^2 + 2 \E_{q_1(y_1|x)}[y_1] y_2 - 2y_2 x \right) + \text{const}\\
      & = -\frac{1}{2} \left(2 y_2^2 + 2 m_1 y_2 - 2 y_2 x \right) + \text{const}\\
      & = -\frac{1}{2} \left(2 y_2^2 - 2 y_2(x-m_1) \right) + \text{const}
      \label{eq:barp2}
    \end{align}
    where we have absorbed all terms not involving $y_2$ into the
    constant. Moreover, we set $\E_{q_1(y_1|x)}[y_1]=m_1$. With
    \eqref{eq:loggauss}, this is defines a Gaussian distribution with 
    variance and mean equal to
    \begin{align}
      \sigma_2^2 &= \frac{1}{2} & m_2 & = \frac{1}{2}(x-m_1)
    \end{align}
    Hence the optimal marginal variational distributions $q_1(y_1|x)$ and
    $q_2(y_2|x)$ are both Gaussian with variance equal to $1/2$. Their
    means satisfy
    \begin{align}
      m_1 & = \frac{1}{2}(x-m_2) & m_2  & = \frac{1}{2}(x-m_1)
    \end{align}
    These are two equations for two unknowns. We can solve them as follows   
    \begin{align}
      2 m_1 &= x - m_2 \\
      & = x -\frac{1}{2}(x-m_1)\\
      4 m_1 & = 2 x -x + m_1 \\
      3 m_1 & = x\\
      m_1 & = \frac{1}{3} x
    \end{align}
    Hence
    \begin{equation}
     m_2 = \frac{1}{2} x - \frac{1}{6} x = \frac{2}{6} x = \frac{1}{3} x
    \end{equation}
    In summary, we find
    \begin{align}
      q_1(y_1 | x) & = \Gauss\left(y_1; \frac{x}{3}, \frac{1}{2}\right) &  q_2(y_2 | x) & = \Gauss\left(y_2; \frac{x}{3}, \frac{1}{2}\right)
    \end{align}
    and the optimal variational distribution $q(y_1, y_2|x) =
    q_1(y_1|x) q_2(y_2|x)$ is Gaussian. We have made the mean field
    (independence) assumption but not the Gaussianity
    assumption. Gaussianity of the variational distribution is a
    consequence of the Gaussianity of the model $p(y_1, y_2, x)$.

    Comparison with the true posterior shows that the mean field
    variational distribution $q(y_1, y_2 | x)$ has the same mean but
    ignores the correlation and underestimates the marginal
    variances. The true posterior and the mean field approximation are
    shown in Figure \ref{fig:meanfield}.

    \begin{figure}[h!]
      \centering
      \includegraphics[width=0.75\textwidth]{meanfield}
      \caption{\label{fig:meanfield}In blue: correlated true posterior. In red: mean field approximation.}
    \end{figure}
  \end{solution}


  
\end{exenumerate}
% --------------------------------------------------
\ex{Variational posterior approximation I}

We have seen that maximising the evidence lower bound (ELBO) with
respect to the variational distribution $q$ minimises the
Kullback-Leibler divergence to the true posterior $p$. We here assume
that $q$ and $p$ are probability density functions so that the
Kullback-Leibler divergence between them is defined as
\begin{equation}
  \KL(q || p) = \int q(\x) \log \frac{q(\x)}{p(\x)} \ud \x = \E_q \left[\log \frac{q(\x)}{p(\x)}\right].
\end{equation}


\begin{exenumerate}

\item You can here assume that $\x$ is one-dimensional so that $p$
  and $q$ are univariate densities. Consider the case where $p$ is
  a bimodal density but the variational densities $q$ are
  unimodal. Sketch a figure that shows $p$ and a variational
  distribution $q$ that has been learned by minimising $\text{KL}(q
  || p)$. Explain qualitatively why the sketched $q$ minimises
  $\text{KL}(q || p)$.

  \begin{solution}

  A possible sketch is shown in the figure below.

  \begin{figure}[h!]
    \centering
    \includegraphics[width = 0.5 \textwidth]{KL-asym}
  \end{figure}

  Explanation: We can divide the domain of $p$ and $q$ into the areas
  where $p$ is small (zero) and those where $p$ has significant
  mass. Since the objective features $q$ in the numerator while $p$ is
  in the denominator, an optimal $q$ needs to be zero where $p$ is
  zero. Otherwise, it would incur a large penalty (division by
  zero). Since we take the expectation with respect to $q$, however,
  regions where $p>0$ do not need to be covered by $q$; cutting them
  out does not incur a penalty. Hence, optimal unimodal $q$ only cover
  one peak of the bimodal $p$.
   
  \end{solution}

  
\item Assume that the true posterior $p(\x) = p(x_1, x_2)$ factorises into two Gaussians of mean zero and variances $\sigma_1^2$ and $\sigma_2^2$,
  \begin{equation}
    p(x_1,x_2) = \frac{1}{\sqrt{2\pi \sigma_1^2}} \exp\left[-\frac{x_1^2}{2 \sigma_1^2}\right]\frac{1}{\sqrt{2\pi \sigma_2^2}} \exp\left[-\frac{x_2^2}{2 \sigma_2^2}\right].
  \end{equation}
  Assume further that the variational density $q(x_1,x_2; \lambda^2)$ is parametrised as
  \begin{equation}
    q(x_1,x_2;\lambda^2) = \frac{1}{2\pi \lambda^2} \exp\left[-\frac{x_1^2+x_2^2}{2 \lambda^2}\right]
    \end{equation}
  where $\lambda^2$ is the variational parameter that is learned
  by minimising $ \KL(q || p)$. If $\sigma^2_2$ is much
  larger than $\sigma^2_1$, do you expect $\lambda^2$ to be closer to $\sigma_2^2$
  or to $\sigma_1^2$? Provide an explanation.

  \begin{solution}
    The learned variational parameter will be closer to $\sigma_1^2$
    (the smaller of the two $\sigma_i^2$).
      
    Explanation: First note that the $\sigma_i^2$ are the variances
    along the two different axes, and that $\lambda^2$ is the single
    variance for both $x_1$ and $x_2$. The objective penalises $q$ if
    it is non-zero where $p$ is zero (see above). The variational
    parameter $\lambda^2$ thus will get adjusted during learning so
    that the variance of $q$ is close to the smallest of the two
    $\sigma_i^2$.

  \end{solution}

\end{exenumerate}

% --------------------------------------------------

\ex{Variational posterior approximation II}

We have seen that maximising the evidence lower bound (ELBO) with
respect to the variational distribution minimises the
Kullback-Leibler divergence to the true posterior. We here
investigate the nature of the approximation if the family of
variational distributions does not include the true posterior.

\begin{exenumerate}

\item Assume that the true posterior for $\x = (x_1, x_2)$ is given by
  \begin{equation}
    p(\x) = \normal(x_1 ; \sigma_1^2)\normal(x_2 ; \sigma_2^2) 
  \end{equation}
  and that our variational distribution $q(\x; \lambda^2)$ is
  \begin{equation}
    q(\x; \lambda^2) = \normal(x_1 ; \lambda^2)\normal(x_2 ; \lambda^2),
  \end{equation}
  where $\lambda >0$ is the variational parameter. Provide an
  equation for
  \begin{equation}
    J(\lambda) = \KL(q(\x; \lambda^2) || p(\x)),
  \end{equation}
  where you can omit additive terms that do not depend on
  $\lambda$. 

  \begin{solution}

    We write
    \begin{align}
      \text{KL}(q(\x; \lambda^2) || p(\x)) & = \E_q \left[ \log  \frac{q(\x; \lambda^2)}{p(\x)} \right]\\
      & = \E_q \log q(\x; \lambda^2) - \E_q \log p(\x)\\
      & = \E_q \log  \normal(x_1 ; \lambda^2) + \E_q \log \normal(x_2 ; \lambda^2) \nonumber \\
      & \phantom{=} - \E_q \log \normal(x_1 ; \sigma_1^2) - \E_q \log \normal(x_2 ; \sigma_2^2)
    \end{align}
    
    We further have
    \begin{align}
      \E_q \log  \normal(x_i ; \lambda^2) & = \E_q \log \left[ \frac{1}{\sqrt{2\pi \lambda^2}} \exp \left[-\frac{x_i^2}{2 \lambda^2} \right] \right]\\
      & = \log \left[ \frac{1}{\sqrt{2\pi \lambda^2}}\right] -\E_q \left[\frac{x_i^2}{2 \lambda^2}\right]\\
      & = -\log \lambda - \frac{\lambda^2}{2\lambda^2} + \text{const}\\
      & = -\log \lambda - \frac{1}{2} + \text{const}\\
      & = -\log \lambda + \text{const}
    \end{align}
    where we have used that for zero mean $x_i$, $\E_q [x_i^2] = \var(x_i) = \lambda^2$. 

    We similarly obtain
    \begin{align}
    \E_q \log \normal(x_i ; \sigma_i^2) & = \E_q \log \left[ \frac{1}{\sqrt{2\pi \sigma_i^2}} \exp \left[-\frac{x_i^2}{2 \sigma_i^2} \right] \right]\\
      & = \log \left[ \frac{1}{\sqrt{2\pi \sigma_i^2}}\right] -\E_q \left[\frac{x_i^2}{2 \sigma_i^2}\right]\\
    & = -\log \sigma_i - \frac{\lambda^2}{2\sigma_i^2} + \text{const}\\
    & = - \frac{\lambda^2}{2\sigma_i^2} + \text{const}
    \end{align}
    
    We thus have
    \begin{align}
      \text{KL}(q(\x; \lambda^2 || p(\x))  & =  -2 \log \lambda + \lambda^2\left(\frac{1}{2\sigma_1^2}+\frac{1}{2\sigma_2^2}\right)+\text{const}
    \end{align}    
    
  \end{solution}


  
\item Determine the value of $\lambda$ that minimises $J(\lambda) =
  \KL(q(\x; \lambda^2) || p(\x))$. Interpret the result and
  relate it to properties of the Kullback-Leibler
  divergence.

  \begin{solution}

    Taking derivatives of $J(\lambda)$ with respect to $\lambda$ gives
    \begin{align}
      \frac{\partial J(\lambda)}{\partial \lambda} & = -\frac{2}{\lambda} + \lambda \left(\frac{1}{\sigma_1^2}+\frac{1}{\sigma_2^2}\right)
      \end{align}
    Setting it zero yields
    \begin{align}
      \frac{1}{\lambda^2} &= \frac{1}{2}\left(\frac{1}{\sigma_1^2}+\frac{1}{\sigma_2^2}\right)
    \end{align}
    so that
    \begin{align}
      \lambda^2 = 2 \frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}
    \end{align}
    or
    \begin{equation}
    \lambda = \sqrt{2} \sqrt{\frac{\sigma_1^2 \sigma_2^2}{\sigma_1^2+\sigma_2^2}}
    \end{equation}
    This is a minimum because the second derivative of
    $J(\lambda)$
    \begin{equation}
      \frac{\partial^2 J(\lambda)}{\partial \lambda^2} = \frac{2}{\lambda^2} + \left(\frac{1}{\sigma_1^2}+\frac{1}{\sigma_2^2}\right)
    \end{equation}
    is positive for all $\lambda > 0$.
    
    The result has an intuitive explanation: the optimal variance
    $\lambda^2$ is the harmonic mean of the variances $\sigma_i^2$ of
    the true posterior. In other words, the optimal precision
    $1/\lambda^2$ is given by the average of the precisions
    $1/\sigma_i^2$ of the two dimensions.

    If the variances are not equal, e.g.\ if $\sigma_2^2 >
    \sigma_1^2$, we see that the optimal variance of the variational
    distribution strikes a compromise between two types of penalties
    in the KL-divergence: the penalty of having a bad fit because the
    variational distribution along dimension two is too narrow; and
    along dimension one, the penalty for the variational distribution
    to be nonzero when $p$ is small. 

  \end{solution}
  
\end{exenumerate}


% --------------------------------------------------------------- %
\ex{EM algorithm for mixture models}
\label{ex:EM-mixture-models}
Mixture models are statistical models of the form
\begin{equation}
  p(\x; \thetab) = \sum_{k=1}^K \pi_k p_k(\x; \thetab_k)
  \label{eq:mixture-model}
\end{equation}
where each $p_k(\x; \thetab_k)$ is itself a statistical model parameterised by
$\thetab_k$ and the $\pi_k\ge 0$ are mixture weights that sum to one. The
parameters $\thetab$ of the mixture model consist of the parameters $\thetab_k$
of each mixture component and the mixture weights $\pi_k$, i.e.\ $\thetab =
(\thetab_1, \ldots, \thetab_K, \pi_1, \ldots, \pi_K)$. An example is a mixture
of Gaussians where each $p_k(\x; \thetab_k)$ is a Gaussian with parameters given
by the mean vector $\mub_k$ and a covariance matrix $\Sigmab_k$.

The mixture model in \eqref{eq:mixture-model} can be considered to be the
marginal distribution of a latent variable model $p(\x, h; \thetab)$ where $h$
is an unobserved variable that takes on values $1, \ldots, K$ and $p(h=k) =
\pi_k$. Defining $p(\x|h=k; \thetab) = p_k(\x; \thetab_k)$, the latent variable
model corresponding to \eqref{eq:mixture-model} thus is
\begin{equation}
  p(\x, h=k; \thetab) = p(\x|h=k; \thetab)p(h=k) = \pi_k p_k(\x; \thetab_k).
  \label{eq:latent-var-model-long}
\end{equation}
In particular note that marginalising out $h$ gives $p(\x; \thetab)$ in
\eqref{eq:mixture-model}.

\begin{exenumerate}
\item Verify that the latent variable model in \eqref{eq:latent-var-model-long} can be written as
  \begin{equation}
    p(\x, h; \thetab) = \prod_{k=1}^K \left[  \pi_k  p_k(\x; \thetab_k) \right]^{\ind(h=k)}
    \label{eq:latent-var-model}
  \end{equation}
  where $h$ takes values in $1, \ldots, K$.
  
  \begin{solution}
    Since $\ind(h=k)$ is one if $h=k$ and zero otherwise, we have
    \begin{equation}
      p(\x, h=j; \thetab) =  \prod_{k=1}^K \left[  \pi_k  p_k(\x; \thetab_k) \right]^{\ind(j=k)} = \pi_j p_j(\x ; \thetab_j)
    \end{equation}
    for any $j \in \{1, \ldots, K\}$, which matches
    \eqref{eq:latent-var-model-long}.
  \end{solution}

\item Since the mixture model in \eqref{eq:mixture-model} can be seen as the
  marginal of a latent-variable model, we can use the expectation maximisation
  (EM) algorithm to estimate the parameters $\thetab$.

  For a general model $p(\data,\h; \thetab)$ where $\data$ are the observed data
  and $\h$ the corresponding unobserved variables, the EM algorithm iterates between
  computing the expected complete-data log-likelihood $J^l(\thetab)$ and maximising
  it with respect to $\thetab$: 
  \begin{align}
    \textbf{E-step at iteration l:}\quad J^l(\thetab) &= \E_{p(\h | \data; \thetab^l)} [ \log p(\data,\h; \thetab)]\\
    \textbf{M-step at iteration l:}\quad \thetab^{l+1} &= \argmax_{\thetab} J^l(\thetab) 
  \end{align}
  Here $\thetab^l$ is the value of $\thetab$ in the $l$-th iteration. When
  solving the optimisation problem, we also need to take into account
  constraints on the parameters, e.g.\ that the $\pi_k$ correspond to a pmf.
  
  Assume that the data $\mathcal{D}$ consists of $n$ iid data points $\x_i$,
  that each $\x_i$ has associated with it a scalar unobserved variable $h_i$,
  and that the tuples $(\x_i, h_i)$ are all iid. What is $J^l(\thetab)$ under
  these additional assumptions?
  
  \begin{solution}
    Since the $(\x_i, h_i)$ are iid, we have that $p(\data,\h; \thetab) =
    \prod_{i=1}^n p(\x_i, h_i; \thetab)$. Hence
    \begin{align}
      J^l\thetab) &= \E_{p(\h | \data; \thetab^l)} [ \log p(\data,\h; \thetab)]\\
                  & = \E_{p(\h | \data; \thetab^l)}\left[ \sum_{i=1}^n \log  p(\x_i, h_i; \thetab)\right]\\
                  & = \sum_{i=1}^n \E_{p(\h | \data; \thetab^l)}[ \log  p(\x_i, h_i; \thetab)]\\
                  & =  \sum_{i=1}^n \E_{p(h_i | \data; \thetab^l)}[ \log  p(\x_i, h_i; \thetab)]\\
                  & =  \sum_{i=1}^n \E_{p(h_i | \x_i; \thetab^l)}[ \log  p(\x_i, h_i; \thetab)] \label{eq:Jl-intermediate}
    \end{align}
    where in the second last step, we have used that each $\log p(\x_i, h_i;
    \thetab)]$ only involves one latent variable $h_i$ so that we only need to
    take the expectation over $p(h_i | \data; \thetab^l)$, and in the last step,
    we have used that $h_i \independent \x_j$, for $j\neq i$.
   
  \end{solution}

\item Show that for the latent variable model in \eqref{eq:latent-var-model},
  $J^l(\thetab)$ equals
  \begin{align}
    J^l(\thetab) & = \sum_{i=1}^n  \sum_{k=1}^K w_{ik}^l  \log[  \pi_k  p_k(\x_i; \thetab_k) ],\label{eq:Jl-expression}\\
     w_{ik}^l & =  \frac{ \pi^l_k p_k(\x_i; \thetab^l_k)}{ \sum_{k=1}^K \pi^l_k p_k(\x_i; \thetab^l_k)}\label{eq:w-def}
  \end{align}
  Note that the $w_{ik}^l$ are defined in terms of the parameters $\pi_k^l$ and
  $\thetab_k^l$ from iteration $l$. They are equal to the conditional
  probabilities $p(h=k|\x_i; \thetab^l)$, i.e.\ the probability that $\x_i$
  has been sampled from component $p_k(\x_i; \thetab^l_k)$. 
  
  \begin{solution}
    We consider a single term $ \E_{p(h| \x; \thetab^l)}[ \log p(\x, h;
    \thetab)]$ in \eqref{eq:Jl-intermediate}.

    Given the form of the model in \eqref{eq:latent-var-model}, we have that
    \begin{align}
      \log p(\x, h; \thetab) = \sum_{k=1}^K \ind(h=k) \log[  \pi_k  p_k(\x; \thetab_k) ]
    \end{align}
    and hence
    \begin{align}
      \E_{p(h| \x; \thetab^l)}[ \log p(\x, h; \thetab)] &=  \E_{p(h| \x; \thetab^l)}\left[ \sum_{k=1}^K \ind(h=k) \log[  \pi_k  p_k(\x; \thetab_k) ] \right]\\
                                                        &=  \sum_{k=1}^K  \E_{p(h| \x; \thetab^l)}\left[ \ind(h=k) \right] \log[  \pi_k  p_k(\x; \thetab_k) ]\\
                                                        & = \sum_{k=1}^K p(h=k|\x; \thetab^l)  \log[  \pi_k  p_k(\x; \thetab_k) ]
    \end{align}
    where we have used that the expectation over an indicator event equals the
    probability for the event to happen, i.e.\ $\E_{p(h| \x; \thetab^l)}\left[
      \ind(h=k) \right] =  p(h=k|\x; \thetab^l)$.
    
    The probability  $p(h=k|\x; \thetab^l)$ can be determined via the product
    (Bayes') rule and Equations \eqref{eq:latent-var-model-long} and \eqref{eq:mixture-model}
    \begin{align}
      p(h=k|\x; \thetab^l) &= \frac{p(\x, h=k, \thetab^l)}{p(\x; \thetab^l)}\\
                           & = \frac{ \pi^l_k p_k(\x; \thetab^l_k)}{ \sum_{k=1}^K \pi^l_k p_k(\x; \thetab^l_k)}
                             \label{eq:posterior-def}
    \end{align}
    Note that the superscript $^l$ indicates that the $\pi_k^l$ are the mixture
    weights and the $\thetab^l_k$ the model parameters from iteration $l$.

    The objective $J^l(\thetab)$ sums over $n$ terms $ \E_{p(h| \x_i; \thetab^l)}[ \log p(\x_i, h;
    \thetab)]$. Let us denote $p(h=k|\x_i; \thetab^l)$ from \eqref{eq:posterior-def} by $w_{ik}^l$ so that
    \begin{equation}
      \E_{p(h| \x_i; \thetab^l)}[ \log p(\x_i, h; \thetab)] =  \sum_{k=1}^K w_{ik}^l  \log[  \pi_k  p_k(\x; \thetab_k) ]
    \end{equation}
    and
    \begin{align}
      J^l(\thetab) & = \sum_{i=1}^n  \sum_{k=1}^K w_{ik}^l  \log[  \pi_k  p_k(\x_i; \thetab_k) ].
    \end{align}
    The objective $J^l(\thetab)$ takes the form of a weighted log-likelihood. In
    more detail, since $\sum_k w_{ik}^l=1$ for all data points $\x_i$ (and
    $w_{ik}^l\ge 0 $), $ \sum_{k=1}^K w_{ik}^l \log[ \pi_k p_k(\x_i; \thetab_k)
    ]$ is a convex combination. This means that the different components of the
    mixture model compete with each other: larger weights for some
    components mean smaller weights for others. In the extreme case, some
    components may contribute in a negligible way to the $i$-th term of the
    log-likelihood.

    The weights $w_{ik}^l$ are sometimes, in particular for mixture of
    Gaussians, called ``soft-assignments'' because they specify to which extent
    a data points $\x_i$ ``belongs'' to a mixture component $p_k$.
    Alternatively, we can interpret the $w_{ik}^l$ to be the
    ``responsibilities'' of each mixture component $p_k$ for a datapoint $\x_i$.

    In some cases, e.g.\ for computational reasons, we may determine which of
    the $K$ weights $w_{i1}^l, \ldots, w_{iK}^l$ is the largest and then set it
    to one while setting the other weights to zero. This corresponds to
    ``hard-assignments'' (and ``hard EM'') where a data point $\x_i$ is
    exclusively assigned to a single mixture component $p_k$. 
  \end{solution}

\item Assume that the different mixture components $p_k(\x; \thetab_k), k=1,
  \ldots, K$ do not share any parameters. Show that the updated parameter values
  $\thetab_k^{l+1}$ are given by weighted maximum likelihood estimates.

  \begin{solution}
   We interchange the order of the summations in \eqref{eq:Jl-expression} so
   that
   \begin{align}
     J^l(\thetab) & = \sum_{k=1}^K \sum_{i=1}^n  w_{ik}^l  \log[  \pi_k  p_k(\x_i; \thetab_k) ]\\
                  & =  \sum_{k=1}^K\sum_{i=1}^n  w_{ik}^l\log \pi_k + \sum_{k=1}^K\underbrace{\sum_{i=1}^n  w_{ik}^l \log p_k(\x_i; \thetab_k)}_{\ell_k^l(\thetab_k)}
                    \label{eq:Jl-interchanged}
   \end{align}
   When we update the parameters $\thetab_k$ of the mixture components, the
   first term is a constant. The second term is a sum over weighted
   log-likelihoods $\ell_k^l(\thetab_k)$, one for each mixture component. If the
   mixture components do not share parameters, we thus have
   \begin{equation}
     \thetab_k^{l+1} =  \argmax_{\thetab_k}  J^l(\thetab) = \argmax_{\thetab_k} \ell_k^l(\thetab_k)
   \end{equation}
   This means that we can compute $\thetab_k^{l+1}$ as if we performed maximum
   likelihood estimation for the model $p_k(\x; \thetab_k)$, expect that
   the data points $\x_i$ are weighted by the $w_{ik}^l$.
  \end{solution}

\item Show that maximising $J^l(\thetab)$ with respect to the mixture weights
  $\pi_k$ gives the update rule
  \begin{align}
    \pi_k^{l+1} &=  \frac{1}{n}\sum_{i=1}^n w_{ik}^l
  \end{align}
  \begin{solution}
    We start with \eqref{eq:Jl-expression} and drop additive terms that do not
    depend on the $\pi_k$. Since
    \begin{align}
      J^l(\thetab) & =  \sum_{i=1}^n  \sum_{k=1}^K w_{ik}^l  \log \pi_k + \text{terms not depending on the $\pi_k$}
    \end{align}
    we can focus on the objective
    \begin{align}
      J^l_\pi(\pi_1, \ldots, \pi_K) & = \sum_{i=1}^n  \sum_{k=1}^K w_{ik}^l  \log \pi_k\\ 
      &= \sum_{k=1}^K \underbrace{\left( \sum_{i=1}^n w_{ik}^l \right)}_{\omega_k^l}  \log \pi_k \\
                                        & =  \sum_{k=1}^K \omega_k^l \log \pi_k.
    \end{align}
    Taking into account that the $\pi_k = p(h=k)$ define a pmf, the optimisation problem to solve is
    \begin{align}
      \text{maximise} \quad&  \sum_{k=1}^K \omega_k^l \log \pi_k\\
      \text{subject to}\quad &  \pi_k \ge 0 \\
                     &  \sum_{k=1}^K \pi_k = 1 
    \end{align}
    The constrained optimisation problem could be solved via Lagrange
    multipliers. But we here take another approach and solve the optimisation
    problem by phrasing it in terms of a KL-divergence minimisation problem.

    First, note that the $\pi_k$ that maximise $J^l_\pi(\pi_1, \ldots,
    \pi_K)$ will also maximise the re-scaled objective
    \begin{align}
      \frac{1}{\sum_{k=1}^K \omega_k^l}  J_\pi^l(\pi_1, \ldots, \pi_K) & =  \frac{1}{\sum_{k=1}^K \omega_k^l}\sum_{k=1}^K \omega_k^l \log \pi_k\\
                                                                          &=   \sum_{k=1}^K q_k^l \log \pi_k
    \end{align}
    where we introduced
    \begin{equation}
      q_k^l = \frac{\omega_k^l}{\sum_{k=1}^K \omega_{k}^l}.
    \end{equation}
    The $q_k^l$ are non-negative and sum to one. Hence, we can consider them to
    define a pmf.

    Second, note that the $\pi_k$ that maximise $J_\pi^l(\pi_1, \ldots,
    \pi_K)$ will also maximise
    \begin{align}
      \sum_{k=1}^K q_k^l \log \pi_k - \sum_{k=1}^K q_k^l \log q_k^l & = \sum_{k=1}^K q_k^l\log \frac{\pi_k}{q_k^l}\\
                                                                    & = -\sum_{k=1}^K q_k^l \log \frac{q_k^l}{\pi_k}\\
                                                                    & = -\KL(q^l, \pi)
    \end{align}
    since adding constants does not change the solution. Hence, the
    optimal $\pi_k$ are given by the pmf $\pi$ that minimises the KL-divergence
    $\KL(q^l, \pi)$. This means that the optimal $\pi_k$ are
    \begin{align}
      \pi_k & = q_k^l= \frac{\omega_k^l}{\sum_{k=1}^K \omega_{k}^l} = \frac{\sum_{i=1}^n w_{ik}^l}{\sum_{k=1}^K \sum_{i=1}^n w_{ik}^l }.
    \end{align}
    The denominator can be simplified by noting that, with \eqref{eq:w-def},
    $\sum_{k=1}^K w_{ik}^l=1$ so that
    \begin{align}
      \sum_{k=1}^K \sum_{i=1}^n w_{ik}^l = \sum_{i=1}^n \sum_{k=1}^K w_{ik}^l = n
    \end{align}   
    The requested update rule thus is
    \begin{align}
      \pi_k^{l+1} &=  \frac{1}{n}\sum_{i=1}^n w_{ik}^l
    \end{align}
    The update rule does not depend directly on the statistical model $p_k(\x;
    \thetab_k)$ that we may choose for the mixture components. Their influence
    occurs indirectly via the $w_{ik}^l$.
  \end{solution}

\item \label{ex:EM-mixture-models-summary} Summarise the EM-algorithm to learn the parameters $\thetab$ of the
  mixture model in \eqref{eq:mixture-model} from iid data $\x_1, \ldots, \x_n$.

  \begin{solution}
    We collect and summarise the results from the previous questions:
    \begin{itemize}
    \item \textbf{E-step at iteration l:} Compute the posterior probabilities (soft assignments)
      \begin{align}
        w_{ik}^l & =  \frac{ \pi^l_k p_k(\x_i; \thetab^l_k)}{ \sum_{k=1}^K \pi^l_k p_k(\x_i; \thetab^l_k)} 
      \end{align}
      for all data points $\x_i$ and and mixture components $k$. Then formulate
      the objective function $J^l(\thetab)$
      \begin{align}
        J^l(\thetab) & = \sum_{i=1}^n  \sum_{k=1}^K w_{ik}^l  \log[  \pi_k  p_k(\x_i; \thetab_k) ]
      \end{align}
    \item  \textbf{M-step at iteration l:} Compute the new mixture weights
      \begin{align}
        \pi_k^{l+1} &=  \frac{1}{n}\sum_{i=1}^n w_{ik}^l
      \end{align}
      To compute the new mixture parameters $\thetab_k^{l+1}$, maximise
      $J^l(\thetab)$ if some parameters are shared or tied. If the $p_k(\x;
      \thetab_k)$ do not share parameters, the new parameters $\thetab_k^{l+1}$
      are obtained by maximising a weighted log-likelihood for each mixture
      component separately:
      \begin{align}
         \thetab_k^{l+1} &= \argmax_{\thetab_k} \sum_{i=1}^n  w_{ik}^l \log p_k(\x_i; \thetab_k)
      \end{align}
      for $k=1, \ldots, K$.
    \end{itemize}
  \end{solution}
    
\end{exenumerate}

 
% --------------------------------------------------------------- %

\ex{EM algorithm for mixture of Gaussians}
\label{ex:EM-MoG}

We here use the results from \exref{ex:EM-mixture-models} to derive the EM
update rules for a mixture of Gaussians. This is a mixture model where each
mixture component is a Gaussian distribution, i.e.
\begin{align}
  p(\x; \thetab) = \sum_{i=1}^K \pi_k \normal(\x; \mub_k, \Sigmab_k).
\end{align}
We consider the case where each $\mub_k$ and $\Sigmab_k$ can be individually
changed (no tying of parameters). The overall parameters of the model are given
by the $\mub_k, \Sigmab_k$ and the mixture weights $\pi_k\ge 0$, $k=1, \ldots
K$. As in the case of general mixture models, the mixture weights sum to one.

\begin{exenumerate}

\item Determine the maximum likelihood estimates for a multivariate Gaussian
  $\normal(\x; \mub, \Sigmab)$ for iid data $\data = (\x_1, \ldots, \x_n)$ when
  each data point $\x_i$ has a weight $w_i$. The weights are non-negative but do
  not necessarily sum to one.

  \begin{solution}
 The weighted log-likelihood is
    \begin{align}
      \ell(\mub, \Sigmab) & = \sum_{i=1}^n w_i \log \normal(\x_i; \mub, \Sigmab)\\
                          & =  \sum_{i=1}^n w_i \log |\det 2 \pi \Sigmab|^{-1/2} - \frac{1}{2} \sum_{i=1}^n w_i (\x_i-\mub)^\top\Sigmab^{-1}(\x_i-\mub)
    \end{align}
    Introducing the normalised weights $W_i = w_i/ \sum_{i=1}^n w_i$, we have
    \begin{align}
      \frac{1}{ \sum_{i=1}^n w_i} \ell(\mub, \Sigmab)  &=  \log |\det 2 \pi \Sigmab|^{-1/2} - \frac{1}{2} \sum_{i=1}^n W_i (\x_i-\mub)^\top\Sigmab^{-1}(\x_i-\mub)
    \end{align}
    Let us write out the quadratic term
    \begin{align} 
            (\x_i-\mub)^T\Sigmab^{-1}(\x_i-\mub) &= \x_i^\top\Sigmab^{-1}\x_i- 2\x_i^\top \Sigmab^{-1} \mub + \mub^\top \Sigmab^{-1} \mub
    \end{align}
    Hence
    \begin{align}
      \sum_{i=1}^n W_i (\x_i-\mub)^T\Sigmab^{-1}(\x_i-\mub) &= \sum_{i=1}^n W_i \x_i^\top\Sigmab^{-1}\x_i -  2 \sum_{i=1}^n W_i\x_i^\top \Sigmab^{-1} \mub +
                                                             \underbrace{\sum_{i=1}^n W_i}_{=1}    \mub^\top \Sigmab^{-1} \mub  \\
                                                            &= \tr\left[ \left( \sum_{i=1}^n W_i \x_i \x_i^\top \right) \Sigmab^{-1} \right]- 2 \left( \sum_{i=1}^n W_i\x_i  \right)^\top \Sigmab^{-1} \mub +  \mub^\top \Sigmab^{-1} \mub \\
            &= \tr \left( \R \Sigmab^{-1}\right) -2 \b^\top \Sigmab^{-1}\mub +  \mub^\top\Sigmab^{-1} \mub
    \end{align}
    where $\R = \sum_{i=1}^n W_i \x_i \x_i^\top $ and $\b =  \sum_{i=1}^n
    W_i\x_i$. Hence
     \begin{align}
             \frac{1}{ \sum_{i=1}^n w_i} \ell(\mub, \Sigmab)  &=  \log |\det 2 \pi \Sigmab|^{-1/2} - \frac{1}{2} \tr \left( \R \Sigmab^{-1}\right) + \b^\top \Sigmab^{-1}\mub - \frac{1}{2}  \mub^\top\Sigmab^{-1} \mub
     \end{align}
     This has exactly the same form as the unweighted likelihood function, just the
     sufficient statistics $\R$ and $\b$ are computed using the weights. Hence,
     the maximum likelihood estimates, when expressed in terms of $\R$ and $\b$
     remain the same as in the unweighted case:
     \begin{align}
       \hat{\mub} & = \b  =  \sum_{i=1}^n W_i\x_i\\
       \hat{\Sigmab} & = \R - \b\b^\top=  \sum_{i=1}^n W_i \x_i \x_i^\top - \b\b^\top
     \end{align}
     Moreover, since
     \begin{align}
       \sum_{i=1}^n W_i (\x_i-\b)(\x_i-\b)^\top  & =  \sum_{i=1}^n W_i \x_i \x_i^\top - \underbrace{\sum_{i=1}^n W_i \x_i}_{\b} \b^\top - \b \underbrace{\sum_{i=1}^n W_i \x_i^\top}_{\b^\top} + \b\b^\top\\
                                                 &=\R - \b \b^\top - \b \b^\top + \b\b^\top\\
                                                 & =  \R - \b\b^\top
     \end{align}
     we find that the weighted maximum likelihood estimates are the weighted
     average and weighted covariance matrix:
     \begin{align}
       \hat{\mub} & =   \sum_{i=1}^n W_i\x_i&  \hat{\Sigmab} &=    \sum_{i=1}^n W_i (\x_i-\hat{\mub})(\x_i-\hat{\mub})^\top & W_i&= \frac{w_i}{\sum_{i=1}^n w_i}
     \end{align}
  \end{solution}

  \item Use the results from  \exref{ex:EM-mixture-models} to derive the EM update
    rules for the parameters of the Gaussian mixture model.

    \begin{solution}
      From the solution to \exref{ex:EM-mixture-models}\ref{ex:EM-mixture-models-summary} and the derived
      weighted MLE solutions, we find:
      \begin{itemize}
      \item \textbf{E-step at iteration l:} Compute the posterior probabilities (soft assignments)
        \begin{align}
          w_{ik}^l & =  \frac{ \pi^l_k \normal(\x_i; \mub^l_k, \Sigmab_k^l)}{ \sum_{k=1}^K\pi^l_k \normal(\x_i; \mub^l_k, \Sigmab_k^l) } 
      \end{align}
      for all data points $\x_i$ and and mixture components $k$.
    \item  \textbf{M-step at iteration l:}
      \begin{itemize}
      \item Determine the weighted MLEs
        \begin{align}
          \mub_k^{l+1} & =   \sum_{i=1}^n W^l_{ik}\x_i&  \Sigmab_k^{l+1} &=    \sum_{i=1}^n W^l_{ik} (\x_i- \mub_k^{l+1})(\x_i- \mub_k^{l+1})^\top
        \end{align}
        where $W^l_{ik} = w_{ik}^l/(\sum_{i=1}^n w_{ik}^l)$.
      \item Compute the new mixture weights
      \begin{align}
        \pi_k^{l+1} &=  \frac{1}{n}\sum_{i=1}^n w_{ik}^l
      \end{align}
    \end{itemize}
  \end{itemize}
  
      
    \end{solution}
      
\end{exenumerate}
 
