\documentclass{article}

\usepackage[paperwidth=18cm, paperheight=11.8cm,top=0.1cm,bottom=0.1cm,left=0.1cm,right=0.1cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
	\caption{Advantage Actor-Critic (Episode Based)}
	\begin{algorithmic}[1]
		\STATE Randomly initialize policy network $\pi_\theta$: $\theta \gets \theta_0$, value function network $V_\phi$: $\phi \gets \phi_0$.
		\STATE Initialize rollout buffer $\mathcal{B}$ to empty.
		\FOR{$k=1,2,\ldots,K$}
		\STATE Reset environment.
		\STATE Generate an episode following $\pi_\theta$ until it ends with termination or more than $T$ steps. At each time step, observe $\{r_{t}, s_{t+1},d_t\}$, and then store transition $\{s_t,a_t,r_{t},s_{t+1},d_{t}\}$ in $\mathcal{B}$.

		\STATE Take out all transitions $\{s_i,a_i,r_i,s_i^\prime,d_i\}$ from $\mathcal{B}$.
		\STATE Compute advantage at each time step based on $V_\phi$ using any advantage estimation method (such as \textbf{TD Residual}):
		$$
			A_i=r_i+\gamma V_{\phi}(s_i^\prime)(1-d_i)-V_{\phi}(s_i),\quad i=1,2,\dots,|\mathcal{B}|
		$$
		\STATE Compute critic loss based on TD error, and update parameter $\phi$ via any gradient \textbf{descent} algorithm:
		$$
			\begin{aligned}
				L_V  & =\sum_{i=1}^{|\mathcal{B}|} A_i^2 \\
				\phi & \gets\phi+\beta\nabla_\phi L_V
			\end{aligned}
		$$
		\STATE Compute actor loss based on \textbf{Policy Gradient}, and update parameter $\theta$ via any gradient \textbf{ascent} algorithm:
		$$
			\begin{aligned}
				L_\pi  & =\sum_{i=1}^{|\mathcal{B}|}\log\pi(a_i|s_i)_i\cdot A_i \\
				\theta & \gets \theta+\alpha\nabla_\theta L_\pi
			\end{aligned}
		$$
		\STATE Clear rollout buffer $\mathcal{B}$ to empty.
		\ENDFOR
	\end{algorithmic}
\end{algorithm}
\end{document}
