 % Copyright (C) 2012 	Paul Bovbel, paul@bovbel.com
 % 						Richard Abrich, abrichr@gmail.com
 %
 % This file is part our empirical study of boosting algorithms (http://code.google.com/p/boosting-study/)
 % 
 % This is free software; you can redistribute it and/or modify
 % it under the terms of the GNU General Public License as published by
 % the Free Software Foundation; either version 3 of the License, or
 % (at your option) any later version.
 % 
 % This source code is distributed in the hope that it will be useful,
 % but WITHOUT ANY WARRANTY; without even the implied warranty of
 % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 % GNU General Public License for more details.
 % 
 % You should have received a copy of the GNU General Public License
 % along with this source code. If not, see http://www.gnu.org/licenses/

function [ ] = boost( test, dataset, type_classifier, mix_weak_learners, type_boost, weak_learners, cap_min_margin, epochs, perceptron_batch, tree_size, split_cont_var, block_feature_reuse )
%#ok<*SAGROW>
close all;

blocked_feature_history = 1;
label = make_label(dataset, weak_learners, mix_weak_learners, type_classifier, perceptron_batch, block_feature_reuse, split_cont_var, type_boost);

%---------LOAD DATA
switch dataset

    case{'credit'}
        load ../res/credit.mat;
    case{'adult'}
        load ../res/adult.mat;
    case{'toy'}
        load ../res/toy.mat;
    case{'dna'}
        load ../res/dna.mat;        
end

disp(label);

%fix classes
train_data(:,1) = changem(train_data(:,1), -1, 0);
training = true;
testing = false;

%classifier-specific stuff
if mix_weak_learners == true 
    %mixture, preprocess both
    train_data_nocats = expand_cats(train_data);  
    F = size(train_data_nocats,2)-1;
    train_data_nocont = splitcont_zero(train_data, split_cont_var);
elseif strcmp(type_classifier,'perceptron')
    %preprocess data for perceptron
    train_data_nocats = expand_cats(train_data); 
    F = size(train_data_nocats,2)-1;
elseif strcmp(type_classifier,'tree')
    %preprocess data for tree
    train_data_nocont = splitcont_zero(train_data, split_cont_var);
end


%----------TRAIN

%get number of datapoints, features
N = size(train_data,1);
%initialize data-probability
D = ones(N,1)/N;

for i=1:weak_learners
    
    if mix_weak_learners == true
       if rem(i,2)
            type_classifier = 'perceptron';
       else
            type_classifier = 'tree';
       end
    end
    
    disp(['training ', + int2str(i)]);
    %classifier-specific stuff
    if strcmp(type_classifier,'perceptron')
        
        %perceptron mode: online, iterative training
        if perceptron_batch == false
            %random-weighted training set, small random initial weights, train, then test
            train_sample = randsample(1:N,N*epochs,true,D);
            W = 0.1 * (rand(1,F+1) - ones(1,F+1) * .5);
            parameters = perceptron_online(train_data_nocats(train_sample,:), W, training);
            output = perceptron_offline(train_data_nocats, parameters, epochs, D, testing);
        
        %perceptron mode: offline, batch training
        elseif perceptron_batch == true
            
            W = 0.1 * (rand(1,F+1) - ones(1,F+1) * .5);
            parameters = perceptron_offline(train_data_nocats, W, epochs, D, training);
            output = perceptron_offline(train_data_nocats, parameters, epochs, D, testing);
        end
        
    elseif strcmp(type_classifier,'tree') && tree_size == 0
        
        %update which feature was trained on last time, to prevent adaboost "getting stuck"
        if i > 1+double(mix_weak_learners) && block_feature_reuse == true
            prev_feature(rem(i,blocked_feature_history)+1) = weak(i-1-double(mix_weak_learners)).parameters(1);
        else
            prev_feature = 0;
        end 
        
        %train stump
        parameters = stump(train_data_nocont, D, prev_feature, training);
        %get output of stump on training data
        output = stump(train_data_nocont, parameters, prev_feature, testing);
 
    end
    
    %------------BOOSTING
    %flag matches - absolute
    if i >= 1
        match = output .* train_data(:,1);
    else
        match = hypothesis_train(:,end) .* train_data(:,1);
    end
    
    %AdaBoost  
    if strcmp(type_boost,'adaboost')
        %build weak learner structure from trained parameters, update data
        %distribution D
        [weak(i), D] = adaboost(parameters, match, D, i, 0);

        
    %arc-gv
    elseif strcmp(type_boost,'arcgv')
        if i == 1
            min_margin(i) = cap_min_margin;
        else
            min_margin(i) = min(margin_train(:,i-1));
            if min_margin(i) < cap_min_margin
                min_margin(i) = cap_min_margin;
            end
        end
        %build weak learner structure from trained parameters, update data
        %distribution D
        [weak(i), D] = adaboost(parameters, match, D, i, min_margin(i));

    %arc-gv max
    elseif strcmp(type_boost,'arcgvmax')
        if i == 1
            %cap lowest margin to prevent fucked-upedness
            min_margin(i) = cap_min_margin;
        else
            min_margin(i) = max([min_margin(i-1) min(margin_train(:,i-1))]);          
        end
        %build weak learner structure from trained parameters, update data
        %distribution D
        [weak(i), D] = adaboost(parameters, match, D, i, min_margin(i));        
        
    %Smooth Margin - not working - smooth_margin fxn_G never goes above 0
    elseif strcmp(type_boost,'smooth')
        if i > 1
            smooth_margin(i) = -log(sum(exp(-margin_train_num(:,i-1)))) / margin_train_den(i-1);
            if smooth_margin(i) < 0
                smooth_margin(i) = 0;
            end
        else
            smooth_margin(i) = 0;
        end
        [weak(i), D] = adaboost(parameters, match, D, i, smooth_margin(i));
    %}
    end
    
    %calculate votes for training cases
    vote_train(:,i) = weak(i).weight .* output;
    hypothesis_train(:,i) = sign(sum(vote_train,2));  
    
    %keep margins for all weak learners
    margin_train_num(:,i) = train_data(:,1) .* sum(vote_train,2);
    margin_train_den(i) = sum([weak.weight]);
    margin_train(:,i) = margin_train_num(:,i) / margin_train_den(i);
    
    figure(1);
    cdfplot(margin_train(:,end));
    axis([-1 1 0 1]);
    ylabel('Distribution');
    xlabel('Margin');
    title('');
    drawnow
    
end

%get error rate
for i=1:weak_learners
    weak(i).train_error = (1-sum(train_data(:,1) == hypothesis_train(:,i))/N) * 100;
end

%% ------------------TEST

%get number of datapoints, features
N = size(test_data,1);

%fix classes
test_data(:,1) = changem(test_data(:,1), -1, 0);

%classifier-specific stuff
if mix_weak_learners == true 
    %mixture, preprocess both
    test_data_nocats = expand_cats(test_data);  
    test_data_nocont = splitcont_zero(test_data, split_cont_var);
elseif strcmp(type_classifier,'perceptron')
    %preprocess data for perceptron
    test_data_nocats = expand_cats(test_data); 
elseif strcmp(type_classifier,'tree')
    %preprocess data for tree
    test_data_nocont = splitcont_zero(test_data, split_cont_var);
end

%test on each weak learner
for i=1:weak_learners
    
    if mix_weak_learners == true
       if rem(i,2)
            type_classifier = 'perceptron';
       else
            type_classifier = 'tree';
       end
    end
    
    disp(['testing ', + int2str(i)]);
    %classifier-specific stuff
    if strcmp(type_classifier,'perceptron')
        
        %get output of perceptron on testing data
        [output] = perceptron_offline(test_data_nocats, weak(i).parameters, epochs, D, testing);
    
    elseif strcmp(type_classifier,'tree') && tree_size == 0
        
        %get output of stump on testing data
        [output] = stump(test_data_nocont, weak(i).parameters, prev_feature, testing);
    
    end
    
    %calculate votes and hypothesis for testing data
    vote_test(:,i) = output * weak(i).weight;
    hypothesis_test(:,i) = sign(sum(vote_test,2));   
    
    %calculate margin for testing data
    margin_test_num(:,i) = test_data(:,1) .* sum(vote_test,2);
    margin_test_den(i) = sum([weak.weight]);
    margin_test(:,i) = margin_test_num(:,i) / margin_test_den(i);
        
    figure(2);
    cdfplot(margin_test(:,end));
    axis([-1 1 0 1]);
    ylabel('Distribution');
    xlabel('Margin');
    title('Margin CDF');
    drawnow
end

%get error rate
for i=1:weak_learners
    weak(i).test_error = (1-sum(test_data(:,1) == hypothesis_test(:,i))/N) * 100;
        
end

error_data = [ [weak.N]' [weak.train_error]' [weak.test_error]' ];
plot_error(test, margin_train, weak_learners, error_data, label);

end