\documentclass{article}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{stmaryrd}
\textheight=10in
\pagestyle{empty}
\usepackage[margin=1.5cm]{geometry}
\usepackage{float}

\begin{document}
\textbf{Rohith Menon (107666079)}
\section{Evaluating Regularized Polynomial Curve-fitting}
In this experiment we fit a 9 degree polynomial to the given data (which is the first 20 examples from D\_train.dat). The plots show the effect of regularization factor. As we increase the regularization, we find that the curve becomes more simpler polynomial and at lower regularization factors, the curve assumes the original 9 degree polynomial.

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/with_noise_out_size_20_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_20_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_20_lambda_0.png}
\end{array}$
\end{center}
\end{figure}

\section{Using Cross-validation for Model Selection}
In this experiment we fit a 9 degree polynomial to the given data. We perform a 5 fold cross validation to obtain the average root mean squre training and test error. We find that the test errors are higher than that of training RMSE as expected. This is because the test data is something that the training process hasn't seen. As expected the training error goes down as the regularization parameter is decreased. We find that testing error decreases with decrease of regularization factor but beyond a point starts increasing. But with more examples, the errors stabilizes. This shows the point where the polynomial starts overfitting the data. The average root mean square error plots for 20 examples are show below. Also the true function, training data and learned function is plotted.

\begin{figure}[H]
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_20_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_20_lambda_star.png}
\end{array}$
\end{center}
\end{figure}

\newpage

\section{Evaluating the Effect of Training Sample Size}
Here we conduct the above two experiments by varying the values of size of the training set. We show the plot for different sizes of training set. We plot cross validation experiment plots and regularization plots for each of the training data sizes. We see that as the training size increases, we clearly find the pattern of reducing training RMSE with decreasing regularization and the test RMSE follows the same trend. \textbf{We also see that the best training error obtained is 0.1 for RMSE and this doesnot go below that. This effect is because of the noise in data.}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/with_noise_out_size_40_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_40_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_40_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_40_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_40_lambda_star.png}
\end{array}$
\end{center}
\caption{40 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/with_noise_out_size_80_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_80_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_80_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_80_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_80_lambda_star.png}
\end{array}$
\end{center}
\caption{80 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/with_noise_out_size_160_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_160_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_160_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_160_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_160_lambda_star.png}
\end{array}$
\end{center}
\caption{160 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/with_noise_out_size_200_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_200_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/with_noise_out_size_200_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_200_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/with_noise_cv_size_200_lambda_star.png}
\end{array}$
\end{center}
\caption{200 training examples}
\end{figure}

With more data, we find that the regularization factor first decreases and then increases. Also it is interesting to note that as training size approaches test size the RMSE is almost equal. At In the graph below, the test size is always 200 from D\_test.dat. We find that with increase in data size, the test RMSE decreases.
\begin{figure}[H]
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/with_noise_size_lambda.png} &
\includegraphics[width=3in]{../working_dir/with_noise_size_rmse.png}
\end{array}$
\end{center}
\caption{Regularization factor and RMSE variation with training size}
\end{figure}

\section{Evaluating the Effect of Noise}
One observation that is strikingly different with data set without noise is that both training and test RMSE decreases with decrease in regularization factor. This is because without noise, the test and the training data set follows the same underlying curve. \textbf{Training RMSE hits 0 because of the fact that the data is non-noisy.}Hence  Another interesting observation is that the lambda* obtained are all very low compared to the data with noise.
\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/without_noise_out_size_20_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_20_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_20_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_20_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_20_lambda_star.png}
\end{array}$
\end{center}
\caption{20 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/without_noise_out_size_40_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_40_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_40_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_40_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_40_lambda_star.png}
\end{array}$
\end{center}
\caption{40 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/without_noise_out_size_80_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_80_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_80_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_80_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_80_lambda_star.png}
\end{array}$
\end{center}
\caption{80 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/without_noise_out_size_160_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_160_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_160_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_160_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_160_lambda_star.png}
\end{array}$
\end{center}
\caption{160 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{ccc}
\includegraphics[width=2in]{../working_dir/without_noise_out_size_200_lambda_-100.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_200_lambda_-14.png} &
\includegraphics[width=2in]{../working_dir/without_noise_out_size_200_lambda_0.png}
\end{array}$
\end{center}
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_200_avg_rmse.png} &
\includegraphics[width=3in]{../working_dir/without_noise_cv_size_200_lambda_star.png}
\end{array}$
\end{center}
\caption{200 training examples}
\end{figure}

\begin{figure}[H]
\begin{center}$
\begin{array}{cc}
\includegraphics[width=3in]{../working_dir/without_noise_size_lambda.png} &
\includegraphics[width=3in]{../working_dir/without_noise_size_rmse.png}
\end{array}$
\end{center}
\caption{Regularization factor and RMSE variation with training size}
\end{figure}

\section{Recorded weights for lambda*}
For noise free data, we find that higher order polynomial coefficients are mostly 0. This is probably because the data is noise free and the regression learns only necessary coefficients.\\
\newpage
\text{Lambda* and Weight* for Data with noise}\\
\begin{tabular}{ | c | c | c | c |c| }
\hline
Size & ln(lambda*) & Train RMSE & Test RMSE & Weight* \\
\hline
20 & -9.000000 & 0.083433 & 0.112848 & -0.38, 8.61, -15.36, -1.83, 4.34, 5.17, 3.48, 0.99, -1.34, -3.03 \\
40 & -14.000000 & 0.084509 & 0.111204 & -0.52, 13.33, -40.3, 43.25, -12.99, -18.44, 5.85, 18.13, 7.47, -15.15 \\
80 & -22.000000 & 0.095222 & 0.109955 & -0.59,13.72,-19.02,-212.62,1067.83,-2012.8,1237.55,947.11,-1594.64,574.13 \\
160 & -11.000000 & 0.100924 & 0.101744 & -0.58, 13.0, -33.79, 22.63, 3.92, -6.61, -2.07, 4.42, 4.26, -4.46 \\
200 & -9.000000 & 0.102196 & 0.100842 & -0.55, 11.99, -27.02, 7.98, 10.13, 2.78, -2.25, -3.19, -1.26, 2.14 \\
\hline
\end{tabular}
\newline
\newline
\newline
\text{Lambda* and Weight* for Data without noise} \\
\begin{tabular}{ | c | c | c | c | c | }
\hline
Size & ln(lambda*) & Train RMSE & Test RMSE & Weight* \\
\hline
20 & -28.000000 & 0.008433 & 0.006482 & -0.54, 12.3, -31.0, 19.98, 0.07, -0.2, 0.32, -0.31, 0.16, -0.04 \\
40 & -30.000000 & 0.000194 & 0.000297 & -0.54, 12.3, -31.0, 20.0, -0.0, 0.0, -0.01, 0.01, -0.0, 0.0 \\
80 & -35.000000 & 0.000239 & 0.000314 & -0.54, 12.3, -31.0, 20.0, 0.0, -0.01, 0.02, -0.02, 0.01, -0.0 \\
160 & -30.000000 & 0.000244 & 0.000297 & -0.54, 12.3, -31.0, 20.0, -0.0, 0.0, -0.01, 0.01, -0.0, 0.0 \\
200 & -35.000000 & 0.002622 & 0.002999 & -0.54, 12.3, -31.0, 20.0, -0.0, 0.01, -0.01, 0.01, -0.0, 0.0 \\
\hline
\end{tabular}

\end{document}
