function [J, grad] = lrCostFunction(theta, X, y, lambda)
    %   The number of the training examples
    m = length(y);
    %   Compute the sigmoid result
    temp = sigmoid(X * theta);
    %   Compute the cost
    J = (-y' * log(temp) - (1 - y)' * log(1 - temp)) / m + lambda * theta(2:end)' * theta(2:end) / m / 2;
    %   The result which will be returned
    grad = zeros(size(theta));
    %   Compute the gradient descent
    grad(1) = ((X(:, 1))' * (sigmoid(X * theta) - y)) / m;
    grad(2:end) = ((X(:, 2:end))' * (sigmoid(X * theta) - y)) / m + lambda / m * theta(2:end);
    %   Vectorize the gradient descent
    grad = grad(:);
end
