\documentclass[11pt]{article}
\usepackage{latexsym}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{graphicx}%\usepackage{epsfig}
%\usepackage[tight]{subfigure}
\usepackage{enumerate}
\usepackage[pdftex]{hyperref}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{fullpage} 
\usepackage{caption,subcaption}

\begin{document}  

\title{ACRL Homework 2:  Helicopter Control}
\date{February 11, 2014}
\author{Eleanor Avrunin, Priya Deo, Matt Klingensmith}

\maketitle

\begin{enumerate}
\item
\begin{enumerate}
\item  Trim Controls.  For the helicopter to maintain zero velocity, we need to find the control values that set the torques and forces in the dynamics model to zero.  Assuming that the initial velocities are zero, the first three controls just need to balance the relevant \textbf{C} parameters:
\begin{align*}
  u_1 &= -\frac{\mathbf{C_p}^1}{\mathbf{C_p}^3} \\
  u_2 &= -\frac{\mathbf{C_q}^1}{\mathbf{C_q}^3} \\
  u_3 &= -\frac{\mathbf{C_r}^1}{\mathbf{C_r}^3} \\
\end{align*}

For the collective trim, we also need to compensate for gravity along the helicopter's $z$-axis.  The relevant angle $\theta_{roll}$ is computed for us in the Matlab code.
\begin{align*}
  G_z &= mg\cos\theta_{roll} \\
  u_4 &= -\frac{G_z + \mathbf{C_z}^1}{\mathbf{C_z}^3}
\end{align*}

  \begin{figure}[H]
    \centering
    \includegraphics[height=60mm]{openloop.png}
    \caption{Open-loop control with trim parameters.}
    \label{openloop}
  \end{figure}

\item Open-loop Hover.  Figure \ref{openloop} shows the $(n, e, d)$ position of the helicopter over time, using the above trim controls.  As expected, the helicopter does not move.  (Since all values are zero, only the last-plotted, down, is visible.)

  \begin{figure}[H]
    \centering
    \includegraphics[height=60mm]{openloop_noise1.png}
    \includegraphics[height=60mm]{openloop_noise3.png}
    \caption{Two runs with open-loop trim control and noise.}
    \label{noise}
  \end{figure}

\item Noise.  Figure \ref{noise} shows the helicopter's position during two runs with the same open-loop control, but with noise.  In both cases, the helicopter drifts away from the starting point because the constant control values do not compensate for the external perturbations.  Because the noise is random, the helicopter's drift differs from run to run.

\bigskip
\item  LQR Controller
\begin{enumerate}
\item Figure \ref{K} shows the entries of the matrix $K$ as they converge.  After 1000 iterations, the changes in a single iteration are on the order of $10^{-16}$.  Most of the convergence happens in the first 100 iterations, shown in Figure \ref{K:100}, at which point the changes in each iteration are on the order of $10^{-4}$.
  \begin{figure}[H]
    \centering
    \begin{minipage}[b]{3in}
      \centering\includegraphics[height=60mm]{K1000.png}
      \subcaption{After 1000 iterations.}\label{K:1000}
    \end{minipage}
    \begin{minipage}[b]{3in}
      \centering\includegraphics[height=60mm]{K100.png}
      \subcaption{The first 100 iterations.}\label{K:100}
    \end{minipage}
    \caption{The entries of $K$.}
    \label{K}
  \end{figure}


\item Figure \ref{lqr_nonoise} shows the position of the helicopter using the LQR controller in an environment without noise.  The position oscillates on the scale of $10^{-16}$m, essentially hovering in place.

  \begin{figure}[H]
    \centering
    \includegraphics[height=60mm]{lqr_nonoise.png}
    \caption{LQR control without noise.}
    \label{lqr_nonoise}
  \end{figure}

\item Figure \ref{lqr_noise} shows the helicopter's position using same controller in an environment with noise.  The helicopter remains within a 2m radius of the starting position.  While this is worse than the noiseless performance, it is far more stable than the open-loop controller in the noisy environment.

  \begin{figure}[H]
    \centering
    \includegraphics[height=60mm]{lqr_noise.png}
    \caption{LQR control with noise.}
    \label{lqr_noise}
  \end{figure}

\end{enumerate}

\bigskip
\item  Initial Perturbation
\begin{enumerate}
\item  For displacements in ned, a starting point at each of the following locations leads to instability: $ \left[ \begin{array}{c c c} 30 & 0 & 0 \end{array} \right]^T $ for northing only, $ \left[ \begin{array}{c c c} 0 & 20 & 0 \end{array} \right]^T $ for easting only, and $ \left[ \begin{array}{c c c} 0 & 0 & 60 \end{array} \right]^T $ for down only. However, displacements in each of these axes interact, as shown in Figure \ref{perturbation}. For large displacements, the controller has inaccurate estimation of the dynamics and so tries to execute infeasible controls that end up flipping the helicopter upside down.

\begin{figure}[H]
\centering
\includegraphics[height=80mm]{perturbation.png}
\caption{Convex hull of the space of allowable perturbations that stabilizes within the allowed time.}
\label{perturbation}
\end{figure}

\item  For displacements in rotation, a starting point at each of the following locations leads to instability: $ \frac{6 \pi}{8} \left[ \begin{array}{c c c} 1 & 0 & 0 \end{array} \right] $ for x-axis rotations and $ \frac{5 \pi}{8} \left[ \begin{array}{c c c} 0 & 1 & 0 \end{array} \right] $ for y-axis rotations. The helicopter can stabilize for any rotations around the z-axis, since this just requires it to turn in place. Rotations in x can cause the helicopter to start upside down and rotations in y can cause our helicopter to pitch upward and stall. In both of these scenarios, our estimate of the dynamics is grossly inaccurate. 
\\

Like displacements in ned, displacements in rotation interact as well but can also interact positively. For example, we can stabilize for an initial rotation of $ \frac{6 \pi}{8} \left[ \begin{array}{c c c} 1 & 1 & 1 \end{array} \right] $.

\end{enumerate}


\bigskip 
\item  Clipping Distance.  We chose a clipping distance of 25, which was the maximum clipping distance that allowed the helicopter to remain stable at an offset of 10,000 meters in northing.
\\

Figure \ref{clipping} shows the helicopter's position over time with this clipping distance.  Using this clipping distance, the helicopter begins by rapidly tilting toward the goal, and then levels out, maintaining a constant speed and heading.
    \begin{figure}[H]
	\centering
	\includegraphics[height=60mm]{clipping.png}
	\caption{Performance of the helicopter at 10,000 meters in northing with a clip distance of 25.}
	\label{clipping}
      \end{figure}
    
\bigskip    
\item[(f)] Latency.  We noticed that the helicopter rapidly became unstable with latency greater than merely 3 steps. The controller oscillated, causing a positive feedback loop to take control and flip the helicopter over.  Figures \ref{latency:q} and \ref{latency:u} show the resulting orientation and controls, respectively.

 \begin{figure}[H]
   \centering
    \begin{minipage}[b]{3in}
      \centering\includegraphics[height=60mm]{latency_q.png}
      \subcaption{Quaternion (orientation of the helicopter).}\label{latency:q}
    \end{minipage}
    \begin{minipage}[b]{3in}
      \centering\includegraphics[height=60mm]{latency_u.png}
      \subcaption{Controls of the helicopter.}\label{latency:u}
    \end{minipage}
    \caption{Resulting orientation and control of the helicopter with 3 steps of latency.}
    \label{latency}
  \end{figure}

\bigskip
\item[(g)] Policy Search Controller. We used the Nelder-Mead search algorithm to build a black-box controller for the helicopter hovering problem. For a given policy, we generated $n$ trajectories and computed a cost associated with each trajectory. For the trajectory cost, we used the accumulation of the quadratic cost function that was used for LQR at each timestep. We noticed that the R matrix in the LQR formulation was all zeros, so we left it out of our cost formulation. Thus we get the following cost function:
\begin{equation*}
Cost(\xi) = \sum_t x_t^T Q x_t
\end{equation*}
The controller then optimized the average cost of the $n$ generated trajectories over the policy parameters.
\\

We started with a policy that was a linear function of the state.  To reduce the dimensionality of the problem for optimization, we fixed the first 8 weights and last weight for each of the controls. These correspond to the weights on the previous controls, the change in previous controls, and the first component of the quaternion. We knew that the weights on the change in previous controls and the first component of the quaternion should be zero.  For the weights on the previous controls, we preserved the weights produced by the LQR algorithm.  We also initialized the other policy parameters using the results of the LQR controller, since that should be a good starting point for a linear policy.
\\

We initially tried doing noisy rollouts with the helicopter starting at the hover state.  However, the $K_{ss}$ produced by LQR is already a good controller for that problem, so the cost function is both already near an optimum and fairly flat, and the Nelder-Mead optimization does not really move.  Instead, we tried to improve the performance when the helicopter's initial position is perturbed from the target state.  We generated a set of random start positions and orientations, and for every policy ran a noiseless rollout from each of those positions.
\\

We also tried a non-linear policy using the control equations derived from the neural network presented by Ng, et al. \cite{ng}. There were 31 parameters for the neural network, which we learned via the Nelder-Mead search algorithm. Since we didn't have a good estimate as a starting point, and the Nelder-Mead algorithm is prone to getting stuck in local minima, we optimized the policy using random restarts.
\\

For either of these policies, the optimization is very slow.  We reimplemented the helicopter dynamics and simulator and our rollout and policy functions in C++, and ran \texttt{nlopt} on the nonlinear policy.
\\




Another potential controller that we considered (but did not have time to implement) was to find a trajectory from whatever the helicopter's starting position is to the target hover state by optimizing controls over an initial generated trajectory in a noiseless environment.  We would then perform ILQR along that trajectory, which would enable us to robustly follow that path even with noise.  Since each trajectory would be planned specifically for that run's initial state, this could deal with arbitrary perturbations of the initial position and orientation.  Unfortunately, it would be very difficult to get the trajectory-finding part to work fast enough for this to be a practical helicopter controller.

%The linear policy was difficult to optimize because of the large parameter space. Even with limiting the parameter space, the optimization takes a long time to move from its starting position.

%The final linear policy performs better than the LQR controller in that it is more robust to noise and perturbations. This is because we optimize the LQR controller weights using the full non-linear model and we simulate noise in the rollouts. This helps us get a good estimate of the quality of the parameters with respect to the data similar to what we are actually testing on. The LQR controller linearises the dynamics and assumes no noise, so when the assumptions are bad we end up in a different distribution of states and are unable to follow the optimal trajectory. The linear policy does not make the same assumptions, and is better able to model subsequent states and generate a trajectory that the robot can actually follow. 

\bigskip
\item[(h)] Controller with Latency.  We intended to modify our optimization to do rollouts with latency, which would create controllers meant to deal with latency.  We think this optimization would reduce weights on terms in the state that fluctuate rapidly over time.  Because the optimization is looking at the costs of rollouts with latency it can change parameters to affect the overall performance, and we would not need to modify the state or the cost function to explicitly consider latency.
\\

Unfortunately, we ran out of time to re-run the optimization and get new controllers.  When we run our controllers from part (g) with latency, we see....


\end{enumerate}
\bigskip

The file \texttt{Helicopter.mp4} shows a visualization the behavior of the helicopter for conditions (b) through (f) discussed above.
\end{enumerate}

\begin{thebibliography}{}
\bibitem{ng} Autonomous helicopter flight via reinforcement learning. A. Y. Ng, H. J. Kim, M. I. Jordan, and S. Sastry. In S. Thrun, L. Saul, and B. Schoelkopf (Eds.), \textit{Advances in Neural Information Processing Systems (NIPS) 16}, 2004.
\end{thebibliography}

\end{document}