function [DT_LS, DT_ML, cov_DT_ML, varargout] = T1IRAbsComputePar(data, echoes, useWLS, noiselevel,opt,s,f, TR,fitrange)

% COMPUTEPAR: computes parameters with LS and ML estimation for Inversion
% Recovery: SI = A - B*exp(-R1*TI)
% ComputeDT computes the diffusion tensor elements DT_LS and DT_ML from data.
% data : A matrix in which the data of each voxel is stored in a single
%     column. Any datatype convertable to double is allowed.  
% echoes: The matrix containing the inversiontimes that were applied in the
%     acquisitions. Need not be normalised, but when you also provide a b
%     value, you probably want to provide normalized gradients.
% b: default = 1; The b value of the aquisition, leave empty to specify the
%     default (you should then provide the b values in the length of the
%     gradients). Can be a scalar or column vector.
% useWLS : default = false; A boolean that when true specifies that a
%     weighted Least Squares estimate is to be used, but not possible yet.
% noiselevel : A user provided scalar noise level, to be used in the ML
%     estimator instead of the noise level estimated from the LS residu.
%     Note that noise level differences of ~30% to low up to ~70% to high
%     are usually no problem (since in this range it hardly affects the ML
%     estimates).
% maxYD : The maximum value of YD, obtained at the highest TI. Used to
% estimate the value of A in the model.
%
% Outputs:
% DT_LS and DT_ML : These are respectively the (weighted) least squares
%       estimate and the maximum likelihood estimate of the diffusion tensor
%       coefficients.
% cov_DT_ML(:,:,k): The (estimated) covariance matrix of the estimated
%       parameters. (from the second derivative of the likelihoodfunction);
% The following outputs are usually not interesting:
% optimpar(:,k)   : The optimal parameters, as returned by the optimization
%       routine. This might differ from the DT_ML parameters since a
%       transformation to keep the diffusion tensor positive definite might
%       be applied.
% mLLval(k)       : The - log likelihood value, evalutated in the minimum
%       position, as given in optimpar(:,k).
% ef(k)           : The exit flag of the fminunc optimization.
% outp(k)         : The optimization output (with #of funccalls etc)
% gr(:,k)         : The gradient at the optimal point (optimpar(:,k))
% he(:,k)         : reshaped hessian at the optimal point (optimpar(:,k))
%
% It is important (for the most accurate results) that each element in the
% data has the same power of rice distributed noise. This specifically
% means that you should not average frames! (just repeat the relevant rows
% in grad)
%
% Created by Henk Smit, EMC, 01-2011 based on the work by Dirk Poot, University of Antwerp, 13-8-2007. 


ntraces = size(data,2);

%remove highest value
[dataval,dataind]=max(data);
LSechoes=echoes;
LSdata=data;
LSechoes(dataind)=[];
LSdata(dataind)=[];

g = [ones(1,size(LSechoes,1));-LSechoes']';

DT_LS = zeros(3,1,'single'); %HENK
wrongval = ~isfinite(data);
if any(wrongval(:))
    if all(data(wrongval)==0)
        warning('ComputeDT:zeroDataValue','data cannot be zero for ML fit, adjusted to 1');
    else
        warning('ComputeDT:InvalidDataValue',['negative, infinite or NAN data found in ' num2str(nnz(wrongval)) ' elements, invalid for magnitude MR; adjusted to 1']);
    end;
    data(wrongval) = 1;
end;
clear wrongval

if useWLS
    warning('ComputeDT:WeightedLSrequested','Weighted LS not possible (yet). Normal LS will be used');
else
    % normal LS for initialization
        if size(unique(echoes),1)==size(echoes,1)      
            [c2] = fit(echoes(fitrange),data(fitrange),f,s);
            DT_LS(1,1)=c2.a; 
            DT_LS(2,1)=c2.b;
            DT_LS(3,1)=c2.c;
        else
        uniquerange=1:size(echoes,1)/size(unique(echoes),1):size(echoes,1);
        [c2] = fit(echoes(uniquerange),data(uniquerange),f,s);
        DT_LS(1,1)=c2.a; 
        DT_LS(2,1)=c2.b;
        DT_LS(3,1)=c2.c;
        end
end;

%correct for logarithm
%DT_LS(2,:) = exp(min(15,DT_LS(2,:))); % exp = correct log(A_0,) 15 = limit to prevent overflow (inf)

scales=[1000;1;0.001]; %henk scales
DT_LS=DT_LS./scales;

if nargout==1
    return;
end;

DT_ML = zeros(size(DT_LS));
computeCovPar = nargout>=3;

if computeCovPar
    cov_DT_ML = zeros(3,3,ntraces);
end;

tempOut = cell(6,1);
directStore = cell(max(0,nargout-3),1);
if numel(directStore)>=1
    directStore{1} = DT_ML;
if numel(directStore)>=2
    directStore{2} = zeros(1,ntraces);
if numel(directStore)>=3
    directStore{3} = zeros(1,ntraces);
if numel(directStore)>=4
    directStore{4} = struct('iterations',cell(1,ntraces),'funcCount',[],'cgiterations',[],'firstorderopt',[],'algorithm',[],'message',[]);
if numel(directStore)>=5
    directStore{5} = zeros(7,ntraces);
if numel(directStore)>=6
    directStore{6} = zeros(7*7,ntraces);
if numel(directStore)>=7
    error('too many outputs');
end;
end;
end;
end;
end;
end;
end;

%opt = optimset('fminunc');
%opt = optimset(opt,'Diagnostics','off','LargeScale','off','gradObj','on','Display','off','MaxIter',200,'Hessian','off','TolFun',1e-8);%HENK off
k=1;
nperStep = 1;

while k <= ntraces
    
    krng = k:min(k+nperStep-1,ntraces);
    StVect = double(DT_LS(:,krng)); %initialize with LS parametervector

    if numel(krng)>0
        ddta = double(data(:,krng));
        optfun = @(p) T1IRAbsParameters(p, ddta, echoes , noiselevel, scales, TR);%HENK         
    end;
        
    try
        [tempOut{:}] = fminunc(optfun, StVect, opt);
%         tempOut{4,1}.iterations
%         tempOut{4,1}.message
%         tempOut{4,1}.funcCount
    catch ME
        tempOut{3} = 0;
        tempOut{1} = StVect; % start vector is better than nothing (?)
        if numel(directStore)>=4
            tempOut{4} = directStore{4}(:,krng(1));
            tempOut{4}.message = ['Error in optimization of traces [' num2str(krng) '], error message: ' ME.message];
            tempOut{4}.iterations = nan;
            tempOut{4}.funcCount = nan;
            tempOut{4}.cgiterations = nan;
            tempOut{4}.firstorderopt = nan;
            tempOut{4}.algorithm = [];
        end;
     end;
        
        DT_ML(:,krng) = tempOut{1};
        if computeCovPar
            for kk=1:numel(krng)
                cov_DT_ML(:,:,krng(kk)) = inv( tempOut{6}((kk-1)*2+(1:3),(kk-1)*2+(1:3)) ) ;
            end;
        end;  
        k=k+1;
 end;
 DT_LS=DT_LS.*scales;
 DT_ML=DT_ML.*scales;
varargout = directStore;