\documentclass{article}

\usepackage[paperwidth=20cm, paperheight=16.2cm,top=0.1cm,bottom=0.1cm,left=0.1cm,right=0.1cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
    \caption{\textbf{T}win \textbf{D}elayed \textbf{DD}PG (Timestep Based)}
    \begin{algorithmic}[1]
        \STATE Randomly intialize policy network $\pi_\theta$: $\theta\gets \theta_0$, and action-value networks $Q_{\phi_1}$,$Q_{\phi_2}$: $\phi_1 \gets \phi_{1_0},\ \phi_2 \gets \phi_{2_0}$ .
        \STATE Intialize three target networks with the parameter copies: $\theta^\prime\gets \theta,\ \phi_1^\prime\gets \phi_1,\ \phi_2^\prime\gets \phi_2$ .
        \STATE Initialize replay buffer $\mathcal{B}$, and collect some transitions using random or initial policy before training starts.
        \FOR{$t=1,2,\ldots,T$}
        \STATE Take an action with noise $a_t= \pi_{\theta}(s_t)+\epsilon_t,\quad \epsilon_t \sim \mathcal{N}(0,\sigma)$.
        \STATE Observe $\{r_{t},s_{t+1},d_t\}$, and store transition $\{s_t,a_t,r_{t},s_{t+1},d_t\}$ in $\mathcal{B}$. If the episode terminates, reset environment and continue.
        \STATE Randomly sample a minibatch $B$ with transitions $\{s_i,a_i,r_i,s_i^\prime,d_i\}_{i=1,2,\dots,|B|}$ from $\mathcal{B}$.
        \STATE Compute action-value loss, and update action-value network parameters via any gradient $\mathbf{descent}$ algorithm:
        $$
            \begin{aligned}
                \tilde{a}_i^\prime & = \pi_{\theta^\prime}(s_i^\prime)+\epsilon_i,\quad \epsilon_i \sim \mathrm{clip}(\mathcal{N}(0,\sigma),-c,c) \\
                y_i                & = r_i^\prime+(1-d_i)\cdot \gamma  \underset{j=1,2}{\min} Q_{\phi_j^\prime}(s_i^\prime,\tilde{a}_i^\prime)    \\
                L_{Q_j}            & = \frac{1}{|B|}\sum_{i=1}^{|B|}[y_i-Q_{\phi_j}(s_i,a_i)]^2,\quad j=1,2                                       \\
                \phi_j             & \gets \phi_j+\nabla_{\phi_j}{L_{Q_j}},\quad j=1,2
            \end{aligned}
        $$
        \IF{($t\mod \mathcal{D}elay\_steps$) == $0$}
        \STATE Compute policy loss, and update policy network parameter $\theta$ according to \textbf{Deterministic Policy Gradient Theorem} via any gradient $\mathbf{ascent}$ algorithm::
        $$
            \begin{aligned}
                L_{p}               & =J(\theta)=\frac{1}{|B|}\sum_{i=1}^{|B|} Q_{\phi_1}(s_i,\tilde{a}_i)|_{\tilde{a}_i=\pi_{\theta}(s_i)}                                 \\
                \nabla_\theta L_{p} & =\frac{1}{|B|}\sum_{i=1}^{|B|} \nabla_{a} Q_{\phi_1}(s_i,\tilde{a}_i)|_{\tilde{a}_i=\pi_{\theta}(s_i)}\nabla_\theta \pi_{\theta}(s_i) \\
                \theta              & \gets \theta+\nabla_\theta L_{p}
            \end{aligned}
        $$
        \STATE Update three target network parameters via soft sync (usually $\tau$ is a very small decimal):
        $$
            \begin{aligned}
                \theta^\prime & \gets \tau\theta+(1-\tau)\theta^\prime               \\
                \phi_j^\prime & \gets \tau\phi_j + (1-\tau)\phi_j^\prime,\quad j=1,2
            \end{aligned}
        $$
        \ENDIF
        \ENDFOR
    \end{algorithmic}
\end{algorithm}
\end{document}
