function [boost] = adaboost_train(X, Y, T, depth_limit)
% Runs AdaBoost for T rounds given the predictions of a set of weak learners.
%
% Usage:
%
%   boost = adaboost_train(X, Y, T, DEPTH_LIMIT)
%
% Returns a struct containing the results of running AdaBoost for T rounds.
% The input Y should be a N x 1 vector of labels. X is the N x M feature 
% matrix. T is the number of rounds of boosting. Depth Limit is the depth
% of the decision tree. The returned struct has the following fields:
%
%   boost.err - 1 x T vector of weighted error at round t
%   boost.dt - the decision tree trained at round T
%   boost.train_err - 1 x T vector of cumulative training error
%   boost.alpha - 1 X T vector of weights for combining learners
%
% Trains a depth_limit DT at each iteration, updates stuff, and outputs
% stuff

% create weak learners 
n_features = 28; 
dt_stumps = make_dt_stumps(X, Y, depth_limit, n_features);
stump_prediction = zeros(size(Y,1), numel(dt_stumps));
scores = zeros(size(Y,1), 10, numel(dt_stumps)); 

for i = 1:numel(dt_stumps)
    fprintf('Calculating stump %d predictions... \n', i ); 
    scores(:,:,i) = dt_test_multi(dt_stumps{i}, X); 
    [~, stump_prediction(:,i)] = max(scores(:,:,i), [], 2); 
end 

stump_errors = bsxfun(@ne, stump_prediction, Y); 

% Initialize distribution over examples.
D = ones(size(Y));
D = (1./sum(D))*ones(size(Y));

% number of classes
K = numel( unique (Y) ) ; 

% intialize vectors
rank_error = zeros(1, T); 
dt = cell(1, T); 
err = zeros(1, T); 
alpha = zeros(1, T); 

% current classifier 
curr_classifier = zeros( size(Y,1), K ); 

%t0 = CTimeleft(T);
for t = 1:T
    
    fprintf('Adaboost currently on round %d ... \n', t); 
    
    % train a depth_limit DT
    %dt{t} = dt_train_multi(X, Y, depth_limit, D); 
    
    % compute weighted error
    err_vect = sum( bsxfun(@times, stump_errors, D)); 
    
    %scores = dt_test_multi(dt{t}, X); 
    
    % find classifier with smallest error
    [err(t), ind] = min( err_vect ) ;     
    dt{t} = dt_stumps{ind}; 
    
    % compute weighted training error 
    %err(t) = sum( bsxfun(@times, double( Y_hat~= Y), D) )/sum(D);    
    
    % calculate alpha alpha_t = (1/2)*log((1-eps_t)/eps_t) + log(K - 1) 
    alpha(t) = log( 1 - err(t) ) - log( err(t) )  + log(K - 1);
    
    % update the weights Dt+1 = Dt*exp(alpha_t*( Y ~= h_t(x)))
    D = D.*exp(alpha(t)*(stump_prediction(:,ind) ~= Y) ); 
    
    % normalize D
    D = D/sum( D ); 
    
    % update current classifier : sum upto t (alpha_t * scores)
    curr_classifier = curr_classifier + alpha(t)*scores(:,:,ind);
    %curr_classifier = bsxfun(@rdivide, curr_classifier, sum(curr_classifier, 2)); 
    %[~, prediction] = max(curr_classifier, [], 2); 
    
    % compute rank_error of current classifier 
    rank_error(t) = rank_err(curr_classifier, Y);       
    
end

% Store results of boosting algorithm.
boost.rank_error = rank_error;
boost.err = err;
boost.dt = dt;
boost.alpha = alpha;
