\documentclass{article}

\usepackage[paperwidth=19cm, paperheight=17cm,top=0.1cm,bottom=0.1cm,left=0.1cm,right=0.1cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
    \caption{\textbf{D}eep \textbf{D}eterministic \textbf{P}olicy \textbf{G}radient Algorithm}
    \begin{algorithmic}[1]
        \STATE Randomly initialize policy network $\pi_\theta$: $\theta\gets \theta_0$ and action-value network $Q_\phi$: $\phi\gets \phi_0$.
        \STATE Initialize policy network target $\pi_{\theta^\prime}$:  $\theta^\prime\gets \theta$ and action-value network target $Q_{\phi^\prime}$: $\phi^\prime\gets \phi$ using parameter copies.
        \STATE Initialize replay buffer $\mathcal{B}$, and collect some transitions using random or initial policy before training starts.
        \FOR{$k=1,2,\ldots,K$ }
        \STATE Reset environment.
        \FOR{$t=1,2,\ldots,T$}
        \STATE Take an action with noise $a_t=\pi_\theta(s_t)+\epsilon_t,\quad \epsilon_t \sim \mathcal{N}(0,\sigma)$.
        \STATE Observe $\{r_{t},s_{t+1},d_t\}$ and store transition $\{s_t,a_t,r_{t},s_{t+1},d_t\}$ in $\mathcal{B}$
        \STATE Randomly sample a minibatch $B$ with transitions $\{s_i,a_i,r_i,s_i^\prime,d_i\}_{i=1,2,\ldots,|B|}$ from $\mathcal{B}$
        \STATE Compute action-value loss:
        $$
            L_{Q}=\frac{1}{|B|}\sum_{i=1}^{|B|}\left[ r_i+(1-d_i)\cdot \gamma Q_{\phi^\prime}(s_i^\prime,a_i^\prime)|_{a_i^\prime=\pi_{\theta^\prime}(s_i^\prime)}-Q_\phi(s_i,a_i) \right]^2
        $$
        \STATE Update action-value network parameters via any gradient \textbf{descent} algorithm:
        $$
            \phi \gets \phi + \nabla_\phi L_{Q}
        $$
        \STATE Compute policy loss:
        $$
            L_{p}=\frac{1}{|B|}\sum_{i=1}^{|B|} Q_\phi(s_i,\tilde{a}_i)|_{\tilde{a}_i=\pi_{\theta}(s_i)}
        $$

        \STATE Update policy network parameters according to \textbf{Deterministic Policy Gradient Theorem} via any gradient \textbf{ascent} algorithm:
        $$
            \begin{aligned}
                \nabla_\theta L_{p} & =\frac{1}{|B|}\sum_{i=1}^{|B|}\nabla_a Q_\phi(s_i,\tilde{a}_i)|_{\tilde{a}_i=\pi_\theta(s_i)} \nabla_\theta \pi_\theta(s_i) \\
                \theta              & \gets \theta + \nabla_\theta L_{p}
            \end{aligned}
        $$
        \STATE Update two target networks parameters via soft update (usually $\tau$ is a small decimal):
        $$
            \begin{aligned}
                \theta^\prime & \gets \tau\theta + (1-\tau)\theta^\prime \\
                \phi^\prime   & \gets \tau\phi + (1-\tau)\phi^\prime
            \end{aligned}
        $$

        \IF{episode termiantes}
        \STATE break
        \ENDIF

        \ENDFOR
        \ENDFOR




    \end{algorithmic}
\end{algorithm}
\end{document}
