% This is a test file that solves
% a problem of the type "minimize f(X) + \lambda ||X||_*" for a grid of
% \lambda values  using a first-order predictor-corrector approach.
%
% Refer "Low-rank optimization with trace norm penalty"
% Technical report (arXiv 1112.2318), 2011 for further details.
% Authors:
% Bamdev Mishra and Gilles Meyer
% {b.mishra, g.meyer}@ulg.ac.be

clear all; close all; clc;


%% Setup
d1 = 500;
d2 = 500;
r = 5;
over_sampling = 4.2;  
lambda = 1;

% Options
verbosity = true;
compute_predictions = true;

dof = (d1 + d2 - r) * r;
f = over_sampling * dof / (d1 * d2);
fprintf('Rank %i matrix of size %i times %i and OS = %i\n', r, d1, d2, over_sampling);

%% Optimization algorithm is the trust-region algorithm
optim_algo = @tr_matrix_completion;




%% Paramters
params.tol = 1e-12;
params.vtol = 1e-12;
params.grad_tol = 1e-5;
params.dg_tol = 1e-3;
params.dg_vtol = 1e-5;
params.smax_tol = 1e-3;
params.max_iter = 10000;
params.max_iter_tr = 50;
params.verb = verbosity; % Show output
params.compute_predictions = compute_predictions; % Compute predictions each iteration if asked


params = default_params(params);


%% Target model
n  = d1 * d2;

%% Generating random data
data = generate_randn_data(d1, d2, r, f);
 
% Testing data 
data.ts.nentries = data.ls.nentries;
data.ts.rows = randi(d1, data.ts.nentries, 1);
data.ts.cols = randi(d2, data.ts.nentries, 1);
data.ts.entries = partXY(data.Gs', data.Hs',data.ts.rows,data.ts.cols,data.ts.nentries)';




%% Creating a sparse structure
sparse_structure = sparse(data.ls.rows,data.ls.cols,data.ls.entries,d1,d2);
sparse_structure2 = sparse(data.ls.rows,data.ls.cols,2*data.ls.entries,d1,d2);



%% Setup initial model
model.d1 = d1;
model.d2 = d2;
model.p = 1; 
model.pmax = min(d1, d2);
model.sparse_structure = sparse_structure;
model.sparse_structure2 = sparse_structure2;
model.lambda = lambda;



%% Functions
fun_set=@functions_nsym_polar_geometry; % File that specifies geometry of the search space UBV^T
fun_obj=@functions_matrix_completion_polar; % File that computes cost function, gradient and directional derivative of the gradient for the matrix completion problem


%% Random Intialization
G = randn(model.d1, model.p); H = randn(model.d2, model.p);
[Qg, Rg] = qr(G, 0);
[Qh, Rh] = qr(H, 0);
[q1, b, q2] = svd(Rg * Rh');

model.U = Qg * q1;
model.V = Qh * q2;
model.B = b;

%% Run algorithm
tic;
[model, infos] = low_rank_optimization(optim_algo, data, model, params);
toc;






