%!TEX program = xelatex
\documentclass[lang=en,11pt,a4paper,citestyle =authoryear]{elegantpaper}

% 标题
\title{Homework01 - Optimization Algorithm}
\author{3200102452 \\ WellsGuan}
\date{\today}


% 本文档命令
\usepackage{array,url}
\usepackage{subfigure}
\usepackage[lined,boxed,commentsnumbered]{algorithm2e}
\newcommand{\ccr}[1]{\makecell{{\color{#1}\rule{1cm}{1cm}}}}
\newcommand{\code}[1]{\lstinline{#1}}


% 文档区
\begin{document}

% 标题
\maketitle

%摘要
\begin{abstract}
This project is established for the assignments of Optimization Algorithm by Prof. Xu Xiang, Zhejiang University in 2023 Spring, Summer. Related source codes were published to \href{https://gitee.com/wellsguan/optimization_-algorithm}{Warehouse}.
\end{abstract}

 \subsection*{Problem.1} Implement CG algorithm to solve linear systems in which $A$ is the Hibert matrix, whose elements are $A(i,j)= \dfrac{1}{i+j-1}$. Set the right-hand-side to $b = (1,1,\cdots,1)^T$ and the initial point to $x_0 = 0$. Try dimensions $n=5,8,12,20$ and show the performance of residual with respect to itertion numbers to reduce the residual below $10^{-6}$.
 \vspace{0.5em} \\
 \textbf{Report.} \par 
 We implement the algorithm and apply is in the conditions above, and we get the outputs:
 \begin{lstlisting}
 Iteration times: 6 (CG Method)
Ans:
[[    4.99999961]
 [ -119.99999982]
 [  630.00000027]
 [-1119.99999972]
 [  630.00000028]]
with error: 1.337341210400759e-07
Iteration times: 19 (CG Method)
Ans:
[[-8.00001734e+00]
 [ 5.04000402e+02]
 [-7.56000218e+03]
 [ 4.62000045e+04]
 [-1.38600005e+05]
 [ 2.16216005e+05]
 [-1.68168006e+05]
 [ 5.14800030e+04]]
with error: 8.312144704502817e-09
Iteration times: 39 (CG Method)
Ans:
[[-9.60900751e+00]
 [ 8.15403097e+02]
 [-1.64966572e+04]
 [ 1.35510865e+05]
 [-5.36482610e+05]
 [ 1.02540116e+06]
 [-6.42579031e+05]
 [-6.57591923e+05]
 [ 8.04245014e+05]
 [ 6.63073393e+05]
 [-1.24128150e+06]
 [ 4.65507315e+05]]
with error: 6.571513618295825e-07
Iteration times: 75 (CG Method)
Ans:
[[-1.09826172e+01]
 [ 1.05120304e+03]
 [-2.39585332e+04]
 [ 2.20432146e+05]
 [-9.65352188e+05]
 [ 1.99010177e+06]
 [-1.25270237e+06]
 [-1.34346975e+06]
 [ 8.83233293e+05]
 [ 1.68797243e+06]
 [ 3.88217654e+05]
 [-1.30553808e+06]
 [-1.71055639e+06]
 [-5.28254731e+05]
 [ 1.20868837e+06]
 [ 2.00290061e+06]
 [ 9.44604345e+05]
 [-1.43405356e+06]
 [-2.65095948e+06]
 [ 1.88785352e+06]]
with error: 7.256771744990568e-07
 \end{lstlisting}
 where\[error = ||A(ans)-b||_2\]. Therefore, the realization is correct. And the iteration numbers increases with size, which coincides out expeectation.

 \subsection*{Problem.2} Derive Preconditioned CG Algorithm by applying the standard CG method in the variables $\hat{x}$ and transforming back into the original variables $x$ to see the expression of precondtioner $M$.
 \vspace{0.5em} \\
 \textbf{Report.} \par 
 We consider about the original CG method for $\hat{x_0}$ at first:\\
\begin{algorithm}[H]
  \SetAlgoLined
  \KwData{$\hat{x_0}$}
  Set $\hat{r_0}\leftarrow \hat{A}\hat{x_0}-\hat{b}, \hat{p_0}\leftarrow -\hat{r_0}, k\leftarrow 0$ \;
  \While{$r_k\neq 0$}{
    $\hat{\alpha_k}\leftarrow -\tfrac{\hat{r_k}^T\hat{p_k}}{\hat{p_k}^T\hat{A}\hat{p_k}}$ \;
    $\hat{x_{k+1}}\leftarrow \hat{x_k}+\hat{\alpha_k} \hat{p_k}$ \;
    $\hat{r_{k+1}}\leftarrow \hat{r_k}+\hat{\alpha_k}\hat{A}\hat{p_k}$ \;
    $\hat{\beta_{k+1}}\leftarrow \tfrac{\hat{r_{k+1}}^T\hat{r_{k+1}}}{\hat{r_k}^T\hat{r_k}}$ \;
    $\hat{p_{k+1}}\leftarrow -\hat{r_{k+1}}+\hat{\beta_{k+1}}\hat{p_k}$ \;
   }
\end{algorithm}
where 
\[\hat{A} = C^{-T}AC^{-1}\]
hence
\[\hat{r}_0 = C^{-T}Ax_0-C^{-T}b = C^{-T}r_0\]
and we know for preconditioned CG method:\\
\begin{algorithm}[H]
  \SetAlgoLined
  \KwData{$x_0,M$}
  Set $r_0\leftarrow Ax_0-b$\;
  Solve $My_0 =r_0,{p_0}\leftarrow -{y_0}, k\leftarrow 0$ \;
  \While{$r_k\neq 0$}{
    ${\alpha_k}\leftarrow \tfrac{{r_k}^T{y_k}}{{p_k}^T{A}{p_k}}$ \;
    ${x_{k+1}}\leftarrow {x_k}+{\alpha_k} {p_k}$ \;
    ${r_{k+1}}\leftarrow {r_k}+{\alpha_k}{A}{p_k}$ \;
    Solve $My_{k+1} = r_{k+1}$ \;
    ${\beta_{k+1}}\leftarrow \tfrac{{r_{k+1}}^T{y_{k+1}}}{{r_k}^T{y_k}}$ \;
    $p_{k+1}\leftarrow -{y_{k+1}}+{\beta_{k+1}}{p_k}$ \;
   }
\end{algorithm}
we have $\hat{x_k} = Cx_k$ for each $k$.  For $k=1$, we have \[\hat{x}_1 = \hat{x}_0 + \hat{\alpha}_0\hat{r}_0 =C(x_0 + \dfrac{\hat{r}_0^T\hat{r}_0}{\hat{r}_0^T\hat{A}\hat{r}_0}C^{-1}\hat{r}_0)=C(x_0  + \dfrac{r_0^TC^{-1}C^{-T}r_0}{r_0^TC^{-1}C^{-T}AC^{-1}C^{-T}r_0}C^{-1}C^{-T}r_0)\]
and 
\[x_1 = x_0 + \dfrac{r_0^Ty_0}{y_0^TAy_0}y_0\]
It is easy to show $C^TCy_0 = r_0$ will meet our requirment. For $k\geq 2$, if we have $\hat{x}_{k+1} = Cx_{k+1}, \hat{r}_{k+1} = C^{-T}r_{k+1}, \hat{p}_{k} = Cp_{k}$, we want to show if $M = C^TC$, then the above equations will keep for all $k$. We have
\[p_{k+1} = -y_{k+1}+\dfrac{r_{k+1}^Ty_{k+1}}{r_k^Ty_k}p_k,\quad \hat{p}_{k+1} = -\hat{r}_{k+1}+\dfrac{\hat{r}_{k+1}^T\hat{r}_{k+1}}{\hat{r}_{k}^T\hat{r}_{k}}\hat{p}_k\]
then
\[Cp_{k+1} = -C^{-T}r_{k+1} + \dfrac{r_{k+1}^TC^{-1}C^{-T}r_{k+1}}{r_{k}C^{-1}C^{-T}r_k}\hat{p_k}\]
hence
\[\hat{p}_{k+1} = -C^{-T}r_{k+1} + \dfrac{r_{k+1}C^{-1}C^{-T}r_{k+1}}{r_{k}^TC^{-1}C^{-T}r_{k}}\hat{p}_k = Cp_{k+1}\]
and
\[\alpha_{k+1} = \dfrac{r_{k+1}^Ty_{k+1}}{p_{k+1}^TAp_{k+1}} = \dfrac{\hat{r}_{k+1}^T\hat{r}_{k+1}}{\hat{p}_{k+1}C^{-T}AC^{-1}\hat{p}_{k+1}} = \hat{\alpha}_{k+1}\]
so obviously $\hat{x}_{k+1} = Cx_{k+1}$ hence $\hat{r}_{k+1} = C^{-T}r_{k+1}$, now we will use the numerical experiment to show this conclusion. Here we set a $C$ to solve problem 1 and calculate $Cx-x'$ for the two algorithm, and we get the following output:
\begin{lstlisting}
0.0
8.881784197001252e-15
1.686860068162545e-11
2.1277160849242683e-09
1.3268506782410634e-06
\end{lstlisting}
which coincides our expectation. Therefore, the conclusion above is proved.

 \subsection*{Problem.3} Try to prove that when $\phi = \phi_k^c = \dfrac{1}{1-\mu_k}$ where $\mu_k= \dfrac{(s_k^TB_ks_k)(y_k^TH_ky_k)}{(s_k^Ty_k)^2}$, the Broyden class
 \[B_{k+1}=B_k-\dfrac{B_ks_ks_k^TB_k}{s_k^TB_ks_k}+\dfrac{y_ky_k^T}{y_k^Ts_k}+\phi_k(s_k^TB_ks_k)v_kv_k^T\]where\[v_k=(\dfrac{y_k}{y_k^Ts_k}-\dfrac{B_ks_k}{s^T_kB_ks_k})\]
 becomes singular.

 \vspace{0.5em} 
 \textbf{Proof.}\par
 We know 
 \[B_{k+1} = B_{k+1}^{(BFGS)}(I+\phi_k(s_k^TB_ks_k)H_{k+1}^{(BFGS)}v_kv_k^T)\]where\[H_{k+1}^{(BFGS)}=(I-\dfrac{s_ky_k^T}{y_k^Ts_k})H_k(I-\dfrac{y_ks_k^T}{y_k^Ts_k})+\dfrac{s_ks_k^T}{y_k^Ts_k}\]
 so\[\det B_{k+1} = \det B_{k+1}^{(BFGS)}(1+\phi_k(s_k^TB_ks_k)v_k^TH_{k+1}^{(BFGS)}v_k)\]
 and \[v^T_kH_{k+1}^{(BFGS)}v_k = v_k^T[(I-\dfrac{s_ky_k^T}{y_k^Ts_k})H_k(I-\dfrac{y_ks_k^T}{y_k^Ts_k})+\dfrac{s_ks_k^T}{y_k^Ts_k}]v_k\]
 It's easy to show that $v_k^Ts_k = s_k^Tv_k = 0$, so we have\[v^T_kH_{k+1}^{(BFGS)}v_k = v_k^TH_kv_k = (\dfrac{y_k^T}{s_k^Ty_k}-\dfrac{s_k^TB_k}{s^T_kB_ks_k})H_k(\dfrac{y_k}{y_k^Ts_k}-\dfrac{B_ks_k}{s^T_kB_ks_k})\]
 hence\[v^T_kH_{k+1}^{(BFGS)}v_k = (\dfrac{y_k^TH_k}{s_k^Ty_k}-\dfrac{s_k^T}{s^T_kB_ks_k})(\dfrac{y_k}{y_k^Ts_k}-\dfrac{B_ks_k}{s^T_kB_ks_k})= \dfrac{y_k^TH_ky_kx}{(s_k^Ty_k)^2}-\dfrac{1}{s^T_kB_ks_k}\]
 so we have \[1+\phi_k(s_k^TB_ks_k)v_k^TH_{k+1}^{(BFGS)}v_k = 1+\phi_k(\mu_k-1) = 0\]
 To sum up, we know that $
\det B_{k+1} = 0$.

 \subsection*{Problem.4} Using BFGS method to minimize the extended Rosenbrock function
 \[f(x) = \sum\limits_{i=1}^{n-1}[100(x_{i+1}-x_i^2)^2+(1-x_i)^2]\]with $x_0 = [-1.2,1,\cdots,-1.2,1]^T,x^*=[1,1,\cdots,1,1]^T$ and $f(x^*)=0$. Try different $n=6,8,10$ and $\epsilon = 10^{-5}$. Moreover, using BFGS method to minimize the Powell singular function \[f(x) = (x_1+10x_2)^2+5(x_3-x_4)^2+(x_2-2x_3)^4+10(x_1-x_4)^4\]with $\epsilon = 10^{-5}, x_0 = [3,-1,0,1]^T, x^*=[0,0,0,0]^T$ and $f(x^*) = 0$.

 \vspace{0.5em} 
 \textbf{Report.}\par
 Derive the BFGS method and apply it to extended Rosenbrock function, we get the outputs
\begin{lstlisting}
For size n = 6
Iteration times: 54 (BFGS Method)
[[0.99999996]
 [0.99999993]
 [0.99999987]
 [0.99999974]
 [0.99999948]
 [0.99999895]]
with error: 1.2056986797122242e-06
For size n = 8
Iteration times: 57 (BFGS Method)
[[0.99999999]
 [0.99999998]
 [0.99999995]
 [0.99999991]
 [0.99999981]
 [0.99999963]
 [0.99999926]
 [0.99999852]]
with error: 1.706984961843332e-06
For size n = 10
Iteration times: 69 (BFGS Method)
[[0.99999999]
 [0.99999998]
 [0.99999998]
 [0.99999997]
 [0.99999995]
 [0.99999991]
 [0.99999983]
 [0.99999964]
 [0.99999929]
 [0.99999859]]
with error: 1.6335516973065555e-06
\end{lstlisting}
where 
\[error = ||ans - x^*||_2\]
and apply it to Powell function, we get the outputs:
\begin{lstlisting}
Iteration times: 18 (BFGS Method)
[[ 9.93201866e-04]
 [-9.93542460e-05]
 [ 1.59013925e-04]
 [ 1.59062596e-04]]
with error: 0.0010231850032207386
\end{lstlisting}
We can conclude that the derivation of BFGS method is correct.
 \end{document}
