function [model, infos] = low_rank_optimization(algofun, data, model, params, test)
% [model, infos] = lowrank_matrix_completion(algofun, data_ts, data_ls, model, params)
% 
% Solves "minimize f(X) + \lambda ||X||_*" for fixed lambda
% See the tech report http://arxiv.org/abs/1112.2318.
% Once lambda (the regularization parameter) is fixed, the optimization is
% done using low-rank optimization. All iterates are low-rank. Finally, a 
% optimality ceritificat is provided by computing the duality gap.
%
% Parameters:
%   algofun                    function handle on the trust-region algorithm
%             
%
%   data.ls.rows               vector containing first indices of the points
%   data.ls.cols               vector containing second indices of the points
%   data.ls.entries            vector of known entries
%   data.ts                    structure array for testing (if not empty)
%   (model.U,model.B,model.V)  initial point
%   params                     structure array containing algorithm parameters
%                              (see default_arams for details)
%
% Output:
%   model        structure array of low-rank polar factors
%   infos        structure array with additional information
%
%
% Authors:
% Bamdev Mishra <b.mishra@ulg.ac.be>

    
verb = params.verb;
ls_max_iter = params.ls_max_iter;
dg_tol = params.dg_tol;
dg_vtol = params.dg_vtol;

d1 = model.d1;
d2 = model.d2;
pmax = model.pmax;

if isfield(model, 'sparse_structure'),
    sparse_structure = model.sparse_structure;
else
    warning('WarnTests:convertTest',...
                'You did not supply the "sparse_structure" field in "model".\nCreating a sparse skeleton, "model.sparse_structure" for efficiency.\n');
    sparse_structure = sparse(data.ls.rows,data.ls.cols,data.ls.entries,model.d1,model.d2);
    model.sparse_structure = sparse_structure;  % creating a field
end

if ~isfield(model, 'sparse_structure2'),
    warning('WarnTests:convertTest',...
                'You did not supply the "sparse_structure2" field in "model".\nCreating a sparse skeleton, "model.sparse_structure2" for efficiency.\n');
    sparse_structure2 = sparse(data.ls.rows,data.ls.cols,data.ls.entries,model.d1,model.d2);
    model.sparse_structure2 = sparse_structure2; % creating a field
end

data_ts = data.ts;
data_ls = data.ls;

if ~isfield(model,'U') || ~isfield(model,'B') || ~isfield(model,'V'),
    G = randn(d1, model.p);
    H = randn(d2, model.p);
    [Q_U, R_U] = qr(G, 0);
    [Q_V, R_V] = qr(H, 0);
    [qL, S, qR] = svd(R_U*R_V');
    model.U = Q_U * qL;
    model.B = S;
    model.V = Q_V * qR;
end

infos.costs = [];
infos.newRank = [];
infos.newRank_lambda = [];
infos.iter_time = [];
rank_count = []; curtail_count = []; % Extra guard against rank deficiency
infos.test_error = []; 

p0 = model.p;

t_begin = tic();
fprintf('*********************\n');
fprintf('lambda = %.5e  \n', model.lambda);
%% Solving the Trace norm minimization for a fixed lambda
while model.p <= pmax, % If p = min(d1,d2), a global min is attained for sure
    if verb,
        fprintf('>> Rank %d <<\n', model.p);
    end
    
    if (model.p > p0),
        % Perform line-search based on the Rank-1 update direction
        costBefore = functions_matrix_completion_polar('f', model, [], data_ls, params);
        if verb,
            fprintf('>> Line-search with rank-1 update\n');
            fprintf('>> Cost before: %.5e\n', costBefore);
        end
        %beta = sig * data.d1 * data.d2 / (4*data.ls.nentries); %2*(sig - model.lambda); % a good guess!
        beta = (sig - model.lambda) * data.d1 * data.d2 / (4*data.ls.nentries); %2*(sig - model.lambda); % a good guess!

        %sufficient_decrease = 1e-4 * abs(restartDir.U'*dual_var*restartDir.V);
        %sufficient_decrease = 0.5 * (sig - model.lambda);
        %sufficient_decrease = 0.5 * sig;
        sufficient_decrease = 0.5 * (sig - model.lambda);

        for i = 0 : ls_max_iter,
            % Rank one update of polar factorization
            test_model = svd_rank_1_update(model,beta*restartDir.U,restartDir.V);
            costAfter = functions_matrix_completion_polar('f', test_model, [], data_ls, params);
            % Armijo condition
            armijo = (costAfter - costBefore) <= - beta*sufficient_decrease; % search beta on the least square objective
            if armijo,
                if verb,
                    fprintf('>> Cost after:  %.5e  #extra-linesearch: %i\n', costAfter, i);
                end
                break;
            else
                beta = beta / 2;
            end
        end
        % Curtail rank if necessary
        [U, B, V, curtail] = curtail_rank(test_model);
        model.U = U; model.B = B; model.V = V; model.p = size(model.U, 2);
        if curtail,
            rank_count = [rank_count; model.p];
            curtail_count = [curtail_count; 1 ];
            if length(curtail_count) > 1 && rank_count(end) == rank_count(end-1), % We have done atleast 2 curtailing to arrive at the same rank, time to stop optimzation
                fprintf('\n\nQuitting! \n\nCurtailed 2 times to arrive at the same rank %i.\n', rank_count(end-1));
                fprintf('The stopping criterion for the convex program seems too strict.\nTry a more stricter tolerance for the Trust-region algorithm...\n')
                clear rank_count curtail_count
                rank_count = []; curtail_count = [];
                break;
            end
        end
        clear U B V;
    end
    
    % Trust-region algorithm for fixed rank
    [model, infos_algo] = feval(algofun, data_ts, data_ls, model, params);
    
    infos.costs = [infos.costs; infos_algo.costs];
    if params.compute_predictions,
        infos.test_error = [infos.test_error; infos_algo.test_error];
    end
    
    % Dual variable candidate
    preds = functions_matrix_completion_polar('predict', model, [], data_ls);
    errors = (preds - data_ls.entries);
    updateSparse(sparse_structure, 2 * errors);
    dual_var = sparse_structure;
   %[u, sig, v] = svds(dual_var, 1, 'L');
    [u, sig, v] = lansvd(dual_var, 1, 'L');
    
    % Duality gap computations
    primal_cost = (errors'*errors) + model.lambda * trace(model.B);
    [duality_gap, rel_duality_gap] = compute_duality_gap(model, data_ls, errors, sig,  primal_cost);
    if verb,
        fprintf('>> sig_max - lambda = %.5e\n', sig - model.lambda);
        fprintf('>> Relative duality gap = %.5e\n', rel_duality_gap);
        fprintf('>> Duality gap = %.5e\n', duality_gap);
    end
    
    t_new = toc(t_begin);
    delta_t = t_new - infos_algo.iter_time(end);
    infos_algo.iter_time = infos_algo.iter_time + delta_t;
    infos.iter_time = [infos.iter_time; infos_algo.iter_time];
    
    % Stopping criterion for low-rank optimization for fixed lambda paramerter
        %     if sig - model.lambda <= smax_tol,
%     if(model.p == 1)
%         delta = inf;
%     else
%         delta = infos.costs(model.p - 1) - infos.costs(model.p);
%     end
%     
%     if(abs(delta) < params.tol)
%         break
%     end
%     fprintf('iter:%d, obj:%.3d(%.3d)\n', model.p, infos.costs(model.p), delta);
        
    if duality_gap <= dg_tol || rel_duality_gap <= dg_vtol,
        %     if duality_gap <= dg_tol,
        %     if rel_duality_gap <= dg_vtol,
        %         fprintf('-------------------------------------------------\n');
        fprintf('OPTIMALITY CERTIFICATE with Relative duality gap %.5e \n',rel_duality_gap);
        fprintf('Duality gap is %.5e \n',duality_gap);
        fprintf('sigma - lambda = %.5e\n',sig - model.lambda);
        fprintf('---------------------\n');
        break;
    end
    % Rank-1 update
    model.p = model.p + 1;
    restartDir.U = -u;
    restartDir.V = v;
    clear u v;
end % END for low-rank optimization for fixed-lambda

end

