function R = kneu_train_logreg(TRAIN,LABEL,varargin)

% Uses logistic regression with regularization to predict your regressors
%
% R = KNEU_TRAIN_LOGREG(TRAIN,LABEL,...)
%
% Logistic regression, but penalises small weights (like weight
% regularization in backprop), so it's doing an implicit feature
% selection.
%
% The only parameter is the penalty parameter (how much to
% penalize low weights), which scales with the number of
% features you have, so it could easily get high (e.g. 10^4
% for lots of features) - see PENALTY
%
% PENALTY (required, default = NaN). You have to specify a PENALTY, or
% this function will fail fatally. I usually set this to around 50 for
% 1000 voxels. The more voxels you have, the higher the penalty.
%
% MAXROUNDS (optional, default = 5000) The maximum number of
% iterations used by the iteratively-reweighted least squares
% (IRLS) algorithm. Typically no more than 8 are required.
%
% TOL (optional, default = 1e-4) The stopping criterion of the IRLS
% algorithm: when the decrease in loglikelihood is below this
% proportion, the algorithm returns.
%
% License:
%=====================================================================
%
% This is part of the Princeton MVPA toolbox, released under
% the GPL. See http://www.csbmb.princeton.edu/mvpa for more
% information.
% 
% The Princeton MVPA toolbox is available free and
% unsupported to those who might find it useful. We do not
% take any responsibility whatsoever for any problems that
% you have related to the use of the MVPA toolbox.
%
% ======================================================================
% modified by CR, Dept. of Neurology, Magdeburg

args = args2Param(varargin);

if args.scale_penalty
  args.penalty = size(TRAIN,2) * args.penalty;
end

[nFeat] = size(TRAIN,2);

R.class_args = args;
R.constant = args.constant;

if args.constant
  TRAIN = [ones(size(TRAIN,1),1), TRAIN];
  R.logreg.betas = NaN(nFeat+1, 1);
else
  R.logreg.betas = NaN(nFeat, 1);  
end

% Do logreg regression on the training data:
% (logistic regression but with added weight parameters)
%

lambda = args.penalty;

R.classes = unique(LABEL);
nConds  = length(R.classes);

% loop over the conditions, running logistic regression
% separately on each, and then concatenate the results
% afterwards
for c=1:nConds
  curL = LABEL==R.classes(c);
  out = kneu_logRegFun(curL', TRAIN', lambda, args.tol, args.maxrounds);
  R.logreg.betas(:,c) = out.weights';
  R.logreg.trainError(c,:) = out.classError;
  R.logreg.rounds(c) = out.rounds;
  R.logreg.ll{c} = out.ll;  
end



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% ------------------------------------------------------------
function param = args2Param(args)

% set default values
param.tol = 1e-4;
param.maxrounds = 5000;
param.penalty = NaN;
param.use_matlab = false;
param.constant = false;
param.scale_penalty = false;

k=1;
while k<length(args),
    if ischar(args{k}),
        if strcmpi('param',args{k}),
            tmp=args{k+1};
            fNames=fieldnames(tmp);
            for p=1:length(fNames),
                eval(['param.' fNames{p} '=tmp.' fNames{p} ';'] );
            end
        end
        if strcmpi('penalty',args{k}),
            param.penalty=args{k+1};
        end
    else
        error('argument name must be a string');
    end
    k=k+2;
end

