function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
	%	The history of the computed cost 
	J_history = zeros(num_iters, 1);
	%	Number of the training examples
	m = length(y);
	%	Train the theta 
	for iter = 1:num_iters
    	theta -= alpha * X' * (X * theta - y) / m;
    	J_history(iter) = computeCost(X, y, theta);
	end
end
