\documentclass{article}

\usepackage[paperwidth=20cm, paperheight=15.6cm,top=0.1cm,bottom=0.1cm,left=0.2cm,right=0.2cm]{geometry}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\pagenumbering{gobble}
\begin{document}
\begin{algorithm}
	\caption{Proximal Policy Optimization (Timestep Based)}
	\begin{algorithmic}[1]
		\STATE Randomly initialize policy network $\pi_\theta$: $\theta\gets \theta_0$, value function network $V_\phi$: $\phi\gets \phi_0$.
		\STATE Initialize rollout buffer $\mathcal{B}$ to empty.
		\REPEAT
		\STATE Collect a trajectory following $\pi_{\theta_{old}}$ until more than $T$ steps. At each time step, observe $\{r_t,s_{t+1},d_t\}$ and store transition $\{s_t,a_t,r_t,s_{t+1},d_t,p_t\}$ where $p_t=\pi_{old}(a_t|s_t)$ in $\mathcal{B}$. If episode terminates early, reset environment and continue.
		\STATE Compute return $G_t$ at each time step using $\gamma-$discounted reward:
		$$
			G_t  =r_{t}+\gamma r_{t+1}+\gamma^2r_{t+2}+\dots+\gamma^{T-t-1}r_{T-1}=r_{t}+\gamma G_{t+1}
		$$
		and append $G_t$ to corresponding transition.
		\STATE Compute advantage $\hat{A}_t$ at each time step based on $V_{\phi}$ using any advantage estimation method (such as \textbf{GAE}):
		$$
			\begin{aligned}
				\delta_t  & =r_{t}+(1-d_{t})\cdot\gamma V(s_{t+1})-V(s_t)                                                                         \\
				\hat{A}_t & =\delta_t+(\gamma\lambda)\delta_{t+1}+\dots+(\gamma\lambda)^{T-t+1}\delta_{T-1}=\delta_t+(\gamma\lambda)\hat{A}_{t+1}
			\end{aligned}
		$$
		and append $\hat{A}_t$ to corresponding transition.
		\FOR{$e=0,1,2,\ldots,E$}
		\STATE Randomly sample a minibatch $B$ with transitions $\{ s_i,a_i,r_i,s_i^\prime,p_i,G_i,\hat{A}_i \}$ from $\mathcal{B}$.
		\STATE Compute policy loss, and update policy network parameter $\theta$ via any gradient \textbf{ascent} algorithm::
		$$
			L_p=\frac{1}{|B|}\sum_{i=1}^{|B|}\min\left[\frac{\pi_\theta (a_i|s_i)}{p_i}\hat{A}_i,\  \mathrm{clip}\left(\frac{\pi_\theta (a_i|s_i)}{p_i},1-\epsilon,1+\epsilon\right)\hat{A}_i\right]
		$$
		$$
			\theta\gets\theta+\alpha \nabla_\theta L_p
		$$

		\STATE Compute value function loss, and update value function network parameter $\phi$ via any gradient \textbf{descent} algorithm::
		$$
			L_V=\frac{1}{|B|}\sum_{i=1}^{|B|}[G_i-V_{\phi}(s_i)]^2
		$$
		$$
			\phi\gets\phi+\beta \nabla_\phi L_V
		$$

		\ENDFOR
		\STATE Clear rollout buffer $\mathcal{B}$ to empty.
		\STATE $\pi_{\theta_{old}}\gets \pi_\theta$

		\UNTIL{convergence}
	\end{algorithmic}
\end{algorithm}
\end{document}
