\documentclass{article}

\usepackage[paperwidth=20cm, paperheight=18.2cm,top=0.1cm,bottom=0.1cm,left=0.2cm,right=0.2cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
    \caption{\textbf{S}oft \textbf{A}ctor-\textbf{C}ritic (Timestep Based)}
    \begin{algorithmic}[1]
        \STATE Randomly initialize policy network $\pi_\theta$: $\theta\gets\theta_0$, action-value networks $Q_{\phi_1}$,$Q_{\phi_2}$: $\phi_1\gets \phi_{1_0}$, $\phi_2\gets \phi_{2_0}$.
        \STATE Initialize two target networks $Q_{\phi_1^\prime}$,$Q_{\phi_2^\prime}$ using parameter copies: $\phi_1^\prime\gets \phi_1$, $\phi_2^\prime\gets \phi_2$.
        \STATE Initialize entropy coefficient $\alpha$ to a small decimal (such as $0.1$).
        \STATE Initialize replay buffer $\mathcal{B}$, and collect some transitions using initial or random policy before training starts.

        \REPEAT
        \FOR{$t=1,2,\ldots,T$}
        \STATE Take an action sampled from $\pi_\theta$: $a_t\sim \pi_{\theta}(\cdot|s_t)$.
        \STATE Observe $\{r_{t},s_{t+1},d_t\}$, and store transition $\{s_t,a_t,r_{t},s_{t+1},d_t\}$ in $\mathcal{B}$. If episode ends, reset environment and continue.
        \ENDFOR
        \FOR{$e=1,2,\ldots,E$}
        \STATE Randomly sample a minibatch $B$ with transitions $\{s_i,a_i,r_i,s_i^\prime,d_i\}_{i=1,2,\ldots,|B|}$ from $\mathcal{B}$.
        \STATE Compute action-value loss, and update action-value network parameters $\phi$ via any gradient \textbf{descent} algorithm:
        $$
            \begin{aligned}
                y_i     & =r_i+(1-d_i)\cdot \gamma\left(\underset{j=1,2}{\min} Q_{\phi_j^\prime}(s_i^\prime,\tilde{a}_i^\prime)-\alpha \log \pi_\theta(\tilde{a}_i^\prime|s_i^\prime)\right),\quad {\tilde{a}_i^\prime \sim \pi_{\theta}(\cdot|s_i^\prime)} \\
                L_{Q_j} & =\frac{1}{|B|}\sum_{i=1}^{|B|}\left( Q_{\phi_j}(s_i,a_i)-y_i \right)^2,\quad j=1,2                                                                                                                                                \\
                \phi_j  & \gets \phi_j+\lambda_{Q}\nabla_{\phi_j} L_{Q_j},\quad j=1,2
            \end{aligned}
        $$
        \STATE Compute policy loss, and update policy network parameter $\theta$ via any gradient \textbf{ascent} algorithm:
        $$
            \begin{aligned}
                L_\pi  & =\frac{1}{|B|}\sum_{i=1}^{|B|}\left(\underset{j=1,2}{\min} Q_{\phi_j}(s_i,\tilde{a}_i)-\alpha \log \pi_\theta(\tilde{a}_i|s_i)\right) \\
                \theta & \gets \theta+\lambda_{\pi}\nabla_{\theta} L_{\pi}
            \end{aligned}
        $$
        while $\tilde{a}$ is sampled via \textbf{Normal distribution reparameterization} to prevent the gradient chain from being destroyed.
        \STATE Compute entropy coefficient loss, and update entropy coefficient $\alpha$ via any gradient \textbf{descent} algorithm:
        $$
            \begin{aligned}
                L_\alpha & =\frac{1}{|B|}\sum_{i=1}^{|B|}(-\alpha\log\pi_\theta (a_i|s_i)-\alpha\mathcal{H}_0) \\
                \alpha   & \gets \alpha+\lambda_\alpha \nabla_\alpha L_\alpha
            \end{aligned}
        $$
        while $\mathcal{H}_0$ is the given target entropy.
        \STATE Update target network parameters $\phi$ via soft sync (usually $\tau$ is a very small decimal, such as $0.01$):
        $$
            \phi_j^\prime\gets\tau\phi_j+(1-\tau)\phi_j^\prime,\quad j=1,2
        $$
        \ENDFOR
        \UNTIL{convergence}





    \end{algorithmic}
\end{algorithm}
\end{document}
