function [U, V, err] = mf_gd(nos, U, V, W, lambda)

% N:        num of rows in R
% M:        num of cols in R
% D:        latent dimensions
% 
% Input:
%   R:          N*M, matrix for training
%   D:          latent dimensions
%   eta:        scaler, learning rate
%   lambda:     scaler, penalty for rank
%
% Output:
%   U:          N*D, latent matrix on rows
%   V:          M*D, latent matrix on columns
%   err:        error of the loss function
%   time:       running time
%----------------------------------------------------------------------

numIters = 1;
epsilon=1e-4;
% max and min number of iterations
maxIters = 300;
err(numIters) = norm(W.*(nos - U*V'), 'fro')^2 + lambda/2*(norm(U, 'fro')^2 + norm(V, 'fro')^2);
rate = inf;

while (numIters < maxIters && rate > epsilon)
    stepSize = (1e-4)/numIters;
    numIters = numIters + 1;
    % Gradient descent on U
    firstTerm = 2*(W.*(U*V'-nos))*V;
    secTerm = lambda * U;
    gradU = firstTerm + secTerm;
    U = U - stepSize * gradU;
    
    % Gradient descent on V;
    firstTerm = 2*((U*V'-nos).*W)'*U;
    secTerm = lambda * V;
    gradV = firstTerm + secTerm;
    V = V - stepSize * gradV;
    
    % Calculate the objective function with current U and V
    err(numIters) = norm(gradU,'fro') + norm(gradV,'fro') ;
    rate = abs(err(numIters-1) - err(numIters))/err(2);

%     disp(['iter' ,int2str(numIters),', err=',num2str(err(numIters))]);
end

