%written by Marcos
function [ w ] = logregnew( X,y,lambda )
     s = @(t) 1./(1+exp(-t)); %sigmoid function 
     %L = @(w) -(1-y)'*(X*w) - sum(log(1+exp(-X*w))) -(lambda/2)*(w'*w); %likelihood 
     L = @(w) loglikelihood(X,y,w,lambda); 
     G = @(w) X'*(y-s(X*w))-lambda*w; %gradient 
     H = @(w) -X'*diag(s(X*w).*(1-s(X*w)))*X-lambda*eye(length(w)); %hessian 
     w = zeros(size(X,2),1); 
     %While gradient is not zero, try Newton, and use gradient descent 
     %if Newton fails. 
     while( norm(G(w))>  10^-6) 
         d = H(w)\G(w); 
         w = w-d; 
        if( L(w-d)<  L(w)) %Newton failed - revert to gradient descent 
             %fprintf('Warning -  Reverting to gradient descent!\n') 
             d = -G(w); 
             F = @(a) -L(w - a*d) ; 
             a = fminbnd(F,0,1); 
                 w = w - a*d; 
         else 
             w = w-d; 
         end 
     end 
end 
