\documentclass{article}

\usepackage[paperwidth=16cm, paperheight=8.7cm,top=0.1cm,bottom=0.1cm,left=0.1cm,right=0.1cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
	\caption{REINFORCE (Episode Based)}
	\begin{algorithmic}[1]
		\STATE Randomly initialize policy network $\pi_\theta$: $\theta\gets \theta_0$.
		\STATE Initialize rollout buffer $\mathcal{B}$ to empty.


		\FOR{$k=1,2,\dots,K$ }
		\STATE Generate an episode following the policy $\pi_{\theta}$, and the episode maybe end with termination or truncation. In each time step, store transition $\{s_t,a_t,r_{t},s_{t+1},p_t\}$ where $p_t=\pi_\theta(a_t|s_t)$ in $\mathcal{B}$.
		\STATE Compute the return $G_t$ at each time step using $\gamma-$discounted reward (from the end of episode):
		$$
			G_t=
			\left\{
			\begin{aligned}
				 & r_{t},\quad t=|\mathcal{B}|                          \\
				 & r_{t}+\gamma G_{t+1},\quad t=|\mathcal{B}|-1,\dots,1
			\end{aligned}
			\right.
		$$

		\STATE Compute the policy gradient, and update policy network parameter $\theta$ via any gradient \textbf{ascent} algorithm:
		$$
			\begin{aligned}
				\nabla_\theta J(\theta) & = \sum_{t=1}^{|\mathcal{B}|}\left[\nabla_\theta \log p_t \cdot G_t \right] \\
				\theta                  & \gets \theta + \alpha \cdot \nabla_\theta J(\theta)
			\end{aligned}
		$$

		\STATE Clear replay buffer $\mathcal{B}$ to empty.
		\ENDFOR

	\end{algorithmic}
\end{algorithm}
\end{document}
