function [model, llh, rate, R] = EM_main(nos, range, lambda, tol, param)

% Input:
%   nos:        N*M, matrix for training
%   range:      scaler, if the difference of 2 sigmas are less than range,
%               combine two gaussian components into a unique gaussian.
%   tol:        scaler, tolerance, 1e-5
%   lambda:     scaler, penalty for rank
%
% Output:
%   model:      include number of gaussian components k, Sigma for each
%               component, weight for components, U, V. 
%   llh:        scaler, likelihood
%   err:        error of the loss function
%-----------------------------------

% Initialize err
err = NaN;
round = 1;
while isnan(err)
    
    % Initialize MoG number K
    k = param.k;
    % Initialize rank r
    r = param.r;

    numIters = 1;
    model = randInitial(nos, k, r, param);
    fprintf('EM for Gaussian mixture: running %dth round... \n', round);
    maxIters = 1000;
    llh = inf;
    err = norm((nos - model.U*model.V'), 'fro')^2;
    
    while numIters < maxIters
        numIters = numIters+1;
        R = Expectation(nos, model);
        model = Maximization(nos, R, model, lambda);
        err(numIters) = calErr(nos, model, R, lambda);
        llh(numIters) = loglh(nos, model, R);
%         rate = abs(err(numIters-1) - err(numIters))
        rate = (abs(llh(numIters-1) - llh(numIters)))/abs(llh(numIters));
        if rate < tol
            break;
        else 
            model = kTuning(model, range);
        end
        if isnan(rate)
            break;
        end
    end
%     err = calErr(nos, model, R, lambda);
    round = round +1;
end

if numIters < maxIters
    fprintf('Converged in %d steps.\n',numIters-1);
else
    fprintf('Not converged in %d steps.\n',maxIters);
end

llh = loglh(nos, model, R);