function [model, infos] = compute_path(algofun, data, model, params)
% [model,  infos] = compute_path(algofun, data, model, params)
% 
% Computes the regularization path using a predictor-corrector approach.
% See the tech report http://arxiv.org/abs/1112.2318.
% Once lambda (the regularization parameter) is fixed, the optimization is
% done using low-rank optimization. All iterates are low-rank. Finally, a 
% optimality ceritificat is provided by computing the duality gap.
%
% Parameters:
%   algofun                    function handle on the trust-region algorithm
%             
%
%   data.ls                    structure array about the given entries
%   data.ts                    structure array for testing (if not empty)
%   (model.U,model.B,model.V)  initial point
%   lambda_array               an array of regularization parameters
%   params                     structure array containing algorithm parameters
%                              (see default_arams for details)
%
% Output:
%   model        structure array of low-rank polar factors
%   infos        structure array with additional information
%
%
% Authors:
% Bamdev Mishra <b.mishra@ulg.ac.be>

    
  verb = params.verb;
  ls_max_iter = params.ls_max_iter;
  smax_tol = params.smax_tol;
  vp_tol = params.vp_tol;
  dg_tol = params.dg_tol;
  dg_vtol = params.dg_vtol;
  
  d1 = model.d1;
  d2 = model.d2;
  pmax = model.pmax;
  lambda_array = model.lambda_array;
  
  
  if isfield(model, 'sparse_structure'),
      sparse_structure = model.sparse_structure;
  else
      warning('WarnTests:convertTest',...
          'You did not supply the "sparse_structure" field in "model".\nCreating a sparse skeleton, "model.sparse_structure" for efficiency.\n');
      sparse_structure = sparse(data.ls.rows,data.ls.cols,data.ls.entries,model.d1,model.d2);
      model.sparse_structure = sparse_structure;  % creating a field
  end
  
  if ~isfield(model, 'sparse_structure2'),
      warning('WarnTests:convertTest',...
          'You did not supply the "sparse_structure2" field in "model".\nCreating a sparse skeleton, "model.sparse_structure2" for efficiency.\n');
      sparse_structure2 = sparse(data.ls.rows,data.ls.cols,data.ls.entries,model.d1,model.d2);
      model.sparse_structure2 = sparse_structure2; % creating a field
  end
  
  data_ts = data.ts;
  data_ls = data.ls;
  
  if ~isfield(model,'U') || ~isfield(model,'B') || ~isfield(model,'V'),
      G = randn(d1, model.p);
      H = randn(d2, model.p);
      [Q_U R_U] = qr(G, 0);
      [Q_V R_V] = qr(H, 0);
      [qL S qR] = svd(R_U*R_V');
      model.U = Q_U * qL;
      model.B = S;
      model.V = Q_V * qR;
  end

  infos.costs = [];
  infos.newRank = [];
  infos.newRank_lambda = [];
  infos.Regpath_sig_lambda = [];
  infos.Regpath_duality_gap = [];
  infos.Regpath_rel_duality_gap = [];
  infos.Regpath_rank = [];
  infos.Regpath_lambda = [];
  infos.Regpath_sol = [];
  infos.Regpath_warm_preds = [];
  infos.Regpath_predictor_preds = [];
  infos.Regpath_preds_all = [];
  infos.iter_time = [];
  infos.Regpath_iter_time = [];
  curtail_count = []; rank_count = []; % Extra guard against rank deficiency
  infos.test_error = []; % Compute predictions at each iteration
 
 t_begin = tic();
 t_begin_reg_path = tic();
 
 %% Loop over lambda 
 for k = 1 : length(lambda_array),
  model.lambda = lambda_array(k);
  token = 1; % A token to start optimization unless already initial point is optimal
  p0 = model.p;
  fprintf('*********************\n');
  fprintf('lambda = %.5e \n',model.lambda);
  fprintf('Rank = %i    \n', model.p);
%   fprintf('*********************\n');

  preds = functions_matrix_completion_polar('predict', model, [], data_ls);
  errors = (preds - data_ls.entries);
  warm_cost = (errors'*errors) + model.lambda*trace(model.B);
  infos.Regpath_warm_preds = [infos.Regpath_warm_preds; warm_cost];
  %% Predictor (-corrector) step
  if k >= 3,
      steSize_prediction = (model.lambda - lambda_array(k - 1)) /  (lambda_array(k - 1) - lambda_array(k - 2) );
      X1.sparse_structure = sparse_structure; X1.lambda = model.lambda;
      X2.sparse_structure = sparse_structure; X2.lambda = model.lambda;
      Prediction_before = warm_cost;
      [U B V] = predictor(X1, X2, data_ls, steSize_prediction, model.lambda, Prediction_before);
      model.U = U;
      model.B = B;
      model.V = V;
  end
  preds = functions_matrix_completion_polar('predict', model, [], data_ls);
  errors = (preds - data_ls.entries);
  cost_predicted = (errors'*errors) + model.lambda*trace(model.B);
  infos.Regpath_predictor_preds = [infos.Regpath_predictor_preds; cost_predicted];

  
  %% Test the prediction obtained,
  % Test if the given point is close to the optimal solution by
  % observing the duality gap
  
  % Dual variable
  updateSparse(sparse_structure, 2 * errors);
  dual_var = sparse_structure;
  %sig  = svds(dual_var, 1, 'L');
  [~, sig, ~] = lansvd(dual_var, 1, 'L');
  
  % Duality gap computations
  primal_cost = (errors'*errors) + model.lambda * trace(model.B);
  [duality_gap, rel_duality_gap] = compute_duality_gap(model, data_ls, errors, sig,  primal_cost);
  
  % Stopping criterion
%   if  sig - model.lambda <= smax_tol,
if duality_gap <= dg_tol || rel_duality_gap <= dg_vtol,
    %   if rel_duality_gap <= dg_vtol,
    token = 0; % point is optimal
    infos.costs = [infos.costs; primal_cost];
    
    % If interested in computing recovery
    if params.compute_predictions,
        preds_test = partXY((model.U*model.B)', model.V', data_ts.rows, data_ts.cols, data_ts.nentries)';
        errors_test = preds_test - data_ts.entries;
        cost_test = (errors_test'*errors_test);
        infos.test_error = [infos.test_error ; cost_test];
    end
    
    infos.iter_time = [infos.iter_time; toc(t_begin_reg_path)];
    fprintf('[0000] cost = %.5e\n', primal_cost);
    fprintf('OPTIMALITY CERTIFICATE with Relative duality gap %.5e \n',rel_duality_gap);
    fprintf('Duality gap is %.5e \n',duality_gap);
    fprintf('sigma - lambda = %.5e\n',sig - model.lambda);
    fprintf('---------------------\n');
end
  
  clear dual_var;
 %% Solving the Trace norm minimization for a fixed lambda 
 while (model.p <= pmax && token == 1), % If p = min(d1,d2), a global min is attained for sure
    if verb,
        fprintf('>> Rank %d <<\n', model.p);
    end
    
    if (model.p > p0),
        % Perform line-search based on the Rank-1 update direction         
        costBefore = functions_matrix_completion_polar('f', model, [], data_ls, params);        
        if verb,
          fprintf('>> Line-search with rank-1 update\n');
          fprintf('>> Cost before: %.5e\n', costBefore);
        end
        beta = (sig - model.lambda) * data.d1 * data.d2 / (4*data.ls.nentries);%  sig - model.lambda; % a good guess! 
        %sufficient_decrease = 1e-4 * abs(restartDir.U'*dual_var*restartDir.V); 
        sufficient_decrease = 0.5 * (sig - model.lambda);

        for i = 0 : ls_max_iter,
            % Rank one update of polar factorization
            test_model = svd_rank_1_update(model,beta*restartDir.U,restartDir.V);
            costAfter = functions_matrix_completion_polar('f', test_model, [], data_ls, params);
            if verb,
                fprintf('>> Cost after:  %.5e  #extra-linesearch: %i\n', costAfter, i);
            end
            % Armijo condition
            armijo = (costAfter - costBefore) <= - beta*sufficient_decrease;
            if armijo,
                break;
            else
                beta = beta / 2;
            end
        end
%         model.U = test_model.U ; model.B = test_model.B ; model.V = test_model.V;
        % Curtail rank if necessary
        [U B V curtail] = curtail_rank(test_model);
        model.U = U; model.B = B; model.V = V; model.p = size(model.U, 2);
        if curtail,
            rank_count = [rank_count; model.p];
            curtail_count = [curtail_count; 1 ];
            if length(curtail_count) > 1 && rank_count(end) == rank_count(end-1), % We have done atleast 2 curtailing to arrive at the same rank, time to stop optimzation
                fprintf('\n\nQuitting! \n\nCurtailed 2 times to arrive at the same rank %i.\n', rank_count(end-1));
                fprintf('The stopping criterion for the convex program seems too strict.\nTry a more stricter tolerance for the Trust-region algorithm...\n')
                clear rank_count curtail_count
                rank_count = []; curtail_count = [];
                break;
            end
        end
        clear U B V;
    end
    
    % Trust-region algorithm for fixed rank
    [model, infos_algo] = feval(algofun, data_ts, data_ls, model, params);
    

    infos.costs = [infos.costs; infos_algo.costs]; 
    if params.compute_predictions,
        infos.test_error = [infos.test_error; infos_algo.test_error];
    end
    
    % Dual variable candidate
    preds = functions_matrix_completion_polar('predict', model, [], data_ls);
    errors = (preds - data_ls.entries);
    updateSparse(sparse_structure, 2 * errors);
    dual_var = sparse_structure;
   %[u, sig, v] = svds(dual_var, 1, 'L');
    [u, sig, v] = lansvd(dual_var, 1, 'L');

    
    % Duality gap computations
    primal_cost = (errors'*errors) + model.lambda * trace(model.B);
    [duality_gap rel_duality_gap] = compute_duality_gap(model, data_ls, errors, sig,  primal_cost);  
    if verb,
      fprintf('>> sig_max - lambda = %.5e\n', sig - model.lambda);
      fprintf('>> Relative duality gap = %.5e\n', rel_duality_gap);
      fprintf('>> Duality gap = %.5e\n', duality_gap);
    end
    
    t_new = toc(t_begin);
    delta_t = t_new - infos_algo.iter_time(end);
    infos_algo.iter_time = infos_algo.iter_time + delta_t;
    infos.iter_time = [infos.iter_time; infos_algo.iter_time];
    % Stopping criterion for low-rank optimization for fixed lambda paramerter
%     if sig - model.lambda <= smax_tol,  
    if duality_gap <= dg_tol || rel_duality_gap <= dg_vtol,
%     if duality_gap <= dg_tol, 
%     if rel_duality_gap <= dg_vtol,
%         fprintf('-------------------------------------------------\n');  
        fprintf('OPTIMALITY CERTIFICATE with Relative duality gap %.5e \n',rel_duality_gap);
        fprintf('Duality gap is %.5e \n',duality_gap);
        fprintf('sigma - lambda = %.5e\n',sig - model.lambda);
        fprintf('---------------------\n');   
      break;
    end

    % Rank-1 update
    model.p = model.p + 1;
    restartDir.U = -u;
    restartDir.V = v;
    clear u v;
  end % END for low-rank optimization for fixed-lambda
  curtail_count = []; rank_count = [];

  %%
  % Collect the information obtained  
  infos.Regpath_duality_gap = [infos.Regpath_duality_gap; duality_gap];
  infos.Regpath_rel_duality_gap = [infos.Regpath_rel_duality_gap; rel_duality_gap];
  infos.Regpath_sig_lambda = [infos.Regpath_sig_lambda; sig - model.lambda];
  infos.Regpath_rank = [infos.Regpath_rank; model.p];
  infos.Regpath_lambda = [infos.Regpath_lambda; model.lambda];
  opt_solution = functions_matrix_completion_polar('f', model, [], data_ls, params);
  infos.Regpath_sol = [infos.Regpath_sol ; opt_solution];  
  if (k >1) && (infos.Regpath_rank(end) > infos.Regpath_rank(end-1)),
      infos.newRank = [infos.newRank; infos.Regpath_rank(end)];
      infos.newRank_lambda = [infos.newRank_lambda; model.lambda ];      
  end
  
  % Data referencing  
  if k == 1
      X1.U = model.U;
      X1.V = model.V;
      X1.B = model.B;
  elseif k == 2;
      X2.U = model.U;
      X2.V = model.V;
      X2.B = model.B;
  elseif k >= 3
      X1.U = X2.U;
      X1.B = X2.B;
      X1.V = X2.V;
      X2.U = model.U;
      X2.V = model.V;
      X2.B = model.B;
  end;
  
  %% If interested in computing recovery along the regularization path
  if params.compute_predictions,
      preds_test = partXY((model.U*model.B)', model.V', data_ts.rows, data_ts.cols, data_ts.nentries)';
      errors_test = preds_test - data_ts.entries;
      cost_test = (errors_test'*errors_test);
      infos.Regpath_preds_all =[infos.Regpath_preds_all; cost_test];
  end
  
 end % Lambda loops ends
 
end

