function [U,R] = grouse(S,size_arr,maxCycles,step_size,maxrank,Startnum,Uinit)

%  Inputs:
%       max_rank = your guess for the rank
%       step_size = the constant for stochastic gradient descent step size
%       maxCycles = number of passes over the data
%       Uinit = an initial guess for the column space U (optional)
%   Outputs:
%       U and R such that UR' approximates X.
if (Startnum==-1)
    numc=size(S,2);
    values = sparse(S(1:end,1:end));
else   
    numc=size_arr+Startnum;
    % Form some sparse matrices for easier matlab indexing
    values = sparse(S(1:end,end+1-numc:end));
end
	numr=size(S,1);  
    Indicator = sparse(values~=0);
%
%Main Algorithm
%
if (nargin<6)
    % initialize U to a random r-dimensional subspace 
    U = orth(randn(numr,maxrank)); 
else
    U = Uinit;
end

for outiter = 1:maxCycles,
    % create a random ordering of the columns for the current pass over the data.
    col_order = randperm(numc);
for k=1:numc,
    % Pull out the relevant indices and revealed entries for this column
    idx = find(Indicator(:,col_order(k)));
    if (length(idx)<5)
        continue;
    else
        v_Omega = values(idx,col_order(k));
        U_Omega = U(idx,:);    
        % Predict the best approximation of v_Omega by u_Omega.  
        % That is, find weights to minimize ||U_Omega*weights-v_Omega||^2
        weights = U_Omega\v_Omega;
        norm_weights = norm(weights);
        % Compute the residual not predicted by the current estmate of U.
        residual = v_Omega - U_Omega*weights;       
        norm_residual = norm(residual);
        % This step-size rule is given by combining Edelman's geodesic
        % projection algorithm with a diminishing step-size rule from SGD.  A different step size rule could suffice here...        
        sG = norm_residual*norm_weights;
        t = step_size*sG/( (outiter-1)*numc + k );
        % Take the gradient step.    
        if t<pi/2, % drop big steps        
            alpha = (cos(t)-1)/norm_weights^2;
            beta = sin(t)/sG;
            step = U*(alpha*weights);
            step(idx) = step(idx) + beta*residual;
            U = U + step*weights';
        end
    end
end

end

% Once we have settled on our column space, a single pass over the data
% suffices to compute the weights associated with each column.  You only
% need to compute these weights if you want to make predictions about these
% columns.

R = zeros(numc,size(U,2));
for k=1:numc,     
    % Pull out the relevant indices and revealed entries for this column
    idx = find(Indicator(:,k));
    v_Omega = values(idx,k);
    U_Omega = U(idx,:);
    % solve a simple least squares problem to populate R
    R(k,:) = (U_Omega\v_Omega)';
end
end