function [F_final, S_final, G_final, nIter_final, objhistory_final] = LPFNMTF_Multi(X, rowK, colK, rowW, colW, options, F, S, G)
% Local Preserving Fast Nonnegative Matrix Tri-Factorization (LPFNMTF)
% Input
%     X: (mFea x nSmp) data matrix
%         mFea  ... number of words (vocaBflary size)
%         nSmp  ... number of documents
%     rowK: number of row hidden factors
%     colK: number of column hidden factors
%     rowW: weight matrix of the affinity graph on features
%     colW: weight matrix of the affinity graph on samples
%     options: Structure holding all settings
%         options.rowLamda ... the regularization parameter. [default: 100]
%         options.colLamda ... the regularization parameter. [default: 100]
%
% You only need to provide the above four inputs.
%
% Output
%     F: nDim * rowK
%     S: rowK * colK
%     G: nSmp * colK
%     obj:
%
% Problem:
%     min ||X - F S G'||^2 + rowLamda F' L_f F + colLamda G' L_s G
%     s.t. F \in {0,1}^{d,m}; G \in {0,1}^{n, c};
%
% Approximation:
%     min ||X - F S G'||^2 + rowLamda ||F - Bf Qf||^2 + colLamda ||G - B_s Q_s||^2
%     s.t. F \in {0,1}^{d,m}; G \in {0,1}^{n, c}; Qf' Qf = I; Q_s' Q_s = I;
%
%
% Note
% (1). F, G are cluster indicator matrices, Not relaxed other matrices
% (2). S is not constrained to be Non-negative, which means this algorithms
% can be applied when X has negative elements
% (3). The optimization of F, G are decoupled, which makes it is easy to
% optimization.
%
% [1]. Fast Nonnegative Matrix Tri-Factorization for Large-Scale Data
% Co-Clustering, Hua Wang, IJCAI, 2011
%

differror = options.error;
maxIter = options.maxIter;
nRepeat = options.nRepeat;
minIter = options.minIter - 1;
if ~isempty(maxIter) && maxIter < minIter
    minIter = maxIter;
end
meanFitRatio = options.meanFitRatio;

rowLamda = options.rowLamda;
colLamda = options.colLamda;

[nDim, nSmp] = size(X);

selectInit = 1;
if isempty(F)
    F = InitFactors(X, rowK, options);
    S = abs(rand(rowK, colK));
    G = InitFactors(X', colK, options);
else
    nRepeat = 1;
end

if rowLamda > 0
    Bf = ApproximateA(rowW, rowK);
    Hf = Bf' * F;
    [Uf, ~, Vf] = svd(Hf); % rowK * rowK
    Qf = Uf * Vf';
    clear Hf Uf Vf;
end

if colLamda > 0
    Bg = ApproximateA(colW, colK);
    Hg = Bg' * G;
    [Ug, ~, Vg] = svd(Hg);
    Qg = Ug * Vg';
    clear Hg Ug Vg;
end

if nRepeat == 1
    selectInit = 0;
    minIter = 0;
    if isempty(maxIter)
        objhistory = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda);
        meanFit = objhistory*10;
    else
        if isfield(options,'Converge') && options.Converge
            objhistory = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda);
        end
    end
else
    if isfield(options,'Converge') && options.Converge
        error('Not implemented!');
    end
end

tryNo = 0;
nIter = 0;
while tryNo < nRepeat
    tryNo = tryNo+1;
    maxErr = 1;
    while(maxErr > differror)
        % **********************************************************
        % Update S when F, G are fixed
        % **********************************************************
        
        S = F' * X * G;
        S = diag(1./(sum(F) + eps)) * S * diag(1./(sum(G) + eps));
        
        % **********************************************************
        % Solve min_Qf ||F - Bf * Qf||^2; subject to Qf' * Qf = I
        % **********************************************************
        
        if rowLamda > 0
            Hf = Bf' * F;
            [Uf, ~, Vf] = svd(Hf); % rowK * rowK
            Qf = Uf * Vf';
            clear Hf Uf Vf;
        end
        
        % **********************************************************
        % Solve min_Qg ||G - Bg * Qg||^2; subject to Qg' * Qg = I
        % **********************************************************
        
        if colLamda > 0
            Hg = Bg' * G;
            [Ug, ~, Vg] = svd(Hg);
            Qg = Ug * Vg';
            clear Hg Ug Vg;
        end
        
        % **********************************************************
        % Update F when S, G, Qf are fixed
        % Assign each feature to the cluster with
        % arg min_c ||x - center_c||^2 + rowLamda || lapPrior_{i,c} ||^2
        % **********************************************************
        
        feaCenter = S * G'; % rowK * nSmp
        Dist = bsxfun(@plus, sum(X.^2,2), sum(feaCenter.^2,2)') - 2 * X * feaCenter;
        
        if rowLamda > 0
            feaLapPrior = Bf * Qf; % nDim * rowK
            Dist = Dist - 2 * rowLamda * feaLapPrior;
        end
        
        [~, f] = min(Dist, [], 2);
        F = sparse([1:size(F,1)]', f, ones(size(F,1),1), size(F,1), size(F,2));
        F = full(F);
        
%         feaCenter = S * G'; % rowK * nSmp
%         feaLapPrior = Bf * Qf; % nDim * rowK
%         F = zeros(nDim, rowK);
%         for iFea = 1:nDim
%             dist = sum( bsxfun(@minus, X(iFea, :), feaCenter).^2, 2)' + rowLamda * feaLapPrior(iFea, :).^2;
%             [~, clusterId] = min(dist);
%             F(iFea, clusterId) = 1;
%         end
        
        % **********************************************************
        % Update G when F, S, Qg are fixed;
        % Assign each sample to the cluster with
        % minimum ||x - Ug_c||^2 + colLamda || lapPrior_{i,c} ||^2
        % **********************************************************
        smpCenter = F * S; % nDim * colK
        Dist = bsxfun(@plus, sum(X.^2)', sum(smpCenter.^2)) - 2 * X' * smpCenter;
        
        if colLamda > 0
            smpLapPrior = Bg * Qg; % nSmp * colK
            Dist = Dist - 2 * rowLamda * smpLapPrior;
        end
        
        [~, g] = min(Dist, [], 2);
        G = sparse([1:size(G,1)]', g, ones(size(G,1),1), size(G,1), size(G,2));
        G = full(G);
        
%         smpCenter = F * S; % nDim * colK
%         smpLapPrior = Bg * Qg; % nSmp * colK
%         G = zeros(nSmp, colK);
%         for iSmp = 1:nSmp
%             dist = sum( bsxfun(@minus, X(:, iSmp), smpCenter).^2) + colLamda * smpLapPrior(iSmp, :).^2;
%             [~, clusterId] = min(dist);
%             G(iSmp, clusterId) = 1;
%         end
        
        nIter = nIter + 1;
        if nIter > minIter
            if selectInit
                objhistory = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda);
                maxErr = 0;
            else
                if isempty(maxIter)
                    newobj = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda);
                    objhistory = [objhistory; newobj]; %#ok<AGROW>
                    meanFit = meanFitRatio*meanFit + (1-meanFitRatio)*newobj;
                    maxErr = (meanFit-newobj)/meanFit;
                else
                    if isfield(options,'Converge') && options.Converge
                        newobj = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda);
                        objhistory = [objhistory; newobj]; %#ok<AGROW>
                    end
                    maxErr = 1;
                    if nIter >= maxIter
                        maxErr = 0;
                        if isfield(options,'Converge') && options.Converge
                        else
                            objhistory = 0;
                        end
                    end
                end
            end
        end
    end
    
    if tryNo == 1
        F_final = F;
        S_final = S;
        G_final = G;
        nIter_final = nIter;
        objhistory_final = objhistory;
    else
        if objhistory(end) < objhistory_final(end)
            F_final = F;
            S_final = S;
            G_final = G;
            nIter_final = nIter;
            objhistory_final = objhistory;
        end
    end
    
    if selectInit
        if tryNo < nRepeat
            %re-start
            F = InitFactors(X, rowK, options);
            S = abs(rand(rowK, colK));
            G = InitFactors(X', colK, options);
            nIter = 0;
        else
            tryNo = tryNo - 1;
            nIter = minIter+1;
            selectInit = 0;
            F_final = F;
            S_final = S;
            G_final = G;
            objhistory = objhistory_final;
            meanFit = objhistory*10;
        end
    end
end

end

function U = InitFactors(X, K, options)
% **********************************************************
% Initialize U with class indicator matrices
% **********************************************************
[n,d] = size(X);

if isfield(options, 'InitKm') && options.InitKm && ~isempty(which('litekmeans.m'));
    r  = litekmeans(X, K);
else
    r = randi(K, [size(X,1), 1]);
end
U = sparse([1:size(X,1)]', r, ones(size(X,1),1), size(X,1), K);
U = full(U);
end

function B = ApproximateA(A, K)
% **********************************************************
% Approximation of tr( F' (I - A) F) to ||F - Bf * Qf||^2
% **********************************************************
% min_C tr(C' (I - A) C) is approximated as
% min_{C, Q'Q = I} ||C - B Q||^2
% A = P S P'=> S_k = S_K => B = P * S_K^.5
nSmp = size(A,1);
DCol = full(sum(A,2));
D_mhalf = spdiags(DCol.^-.5,0,nSmp,nSmp);
A = D_mhalf * A * D_mhalf;
A = (A + A')/2;
[P, S] = svd(A);
S_K_half = diag(S(1:K, 1:K)).^.5;
B = P(:, 1:K) * diag(S_K_half);
end

function obj = CalculateObj(X, F, S, G, Bf, Bg, Qf, Qg, rowLamda, colLamda)
MAXARRAY = 500*1024*1024/8; % 500M. You can modify this number based on your machine's computational power.

nSmp = size(X,2);
mn = numel(X);
nBlock = ceil(mn/MAXARRAY);

if mn < MAXARRAY
    dX = X - F * S * G';
    obj_NMF = sum(sum(dX.^2));
else
    obj_NMF = 0;
    Fs = F * S;
    PatchSize = ceil(nSmp/nBlock);
    for i = 1:nBlock
        if i*PatchSize > nSmp
            smpIdx = (i-1)*PatchSize+1:nSmp;
        else
            smpIdx = (i-1)*PatchSize+1:i*PatchSize;
        end
        dX = X(:,smpIdx) -  Fs * G(smpIdx,:)';
        obj_NMF = obj_NMF + sum(sum(dX.^2));
    end
end

obj_Lf = 0;
if rowLamda > 0
    obj_Lf = rowLamda * sum(sum( (F - Bf * Qf).^2 ));    
end

obj_Lg = 0;
if colLamda > 0
    obj_Lg = colLamda * sum(sum( (G - Bg * Qg).^2 ));
end

obj = obj_NMF + obj_Lf + obj_Lg;
end