 % Copyright (C) 2012 	Paul Bovbel, paul@bovbel.com
 % 						Richard Abrich, abrichr@gmail.com
 %
 % This file is part our empirical study of boosting algorithms (http://code.google.com/p/boosting-study/)
 % 
 % This is free software; you can redistribute it and/or modify
 % it under the terms of the GNU General Public License as published by
 % the Free Software Foundation; either version 3 of the License, or
 % (at your option) any later version.
 % 
 % This source code is distributed in the hope that it will be useful,
 % but WITHOUT ANY WARRANTY; without even the implied warranty of
 % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 % GNU General Public License for more details.
 % 
 % You should have received a copy of the GNU General Public License
 % along with this source code. If not, see http://www.gnu.org/licenses/

function [ output ] = perceptron_offline( data, parameters, epochs, D, train )
%PERCEPTRON train or test perceptron
%   data - input data
%   parameters - either initial training weights, or classifier parameters
%   for testing
%   mode - 'train', or 'test'
%   Variable Output:
%       -Learned parameters from training mode
%       -Classifier output for testing mode
%   mismatch - match/notmatch data

%learning rate for perceptron
eta = 0.001;
%regularization constant
alpha = 100;

N = size(data,1);
F = size(data,2)-1;

X = [ones(N,1) data(:,2:end)];

%in training mode, update W
if train == true
    
    T = data(:,1);

    for i=1:epochs
        W = repmat(parameters,N,1);
        a = W .* X;
        %sigmoid activation:
        %A = 2/(1+exp(-sum(a,2))-1;

        %linear activation:   
        A = sum(a,2);
        %cap the linear activation:
        A = A.*double(A<1) + double(A>=1);
        A = A.*double(A>-1) + -double(A<=-1);

        %step activation:
        %A = sign(sum(a),2);
        
        delta = repmat((T - A) .* D, 1,F+1); %weight by distribution
        dW = sum(delta .* X,1);
        parameters = parameters + eta * (dW - sign(parameters).*alpha.*parameters.^2); %regularization

    end
    output = parameters;
    
%in testing mode, mark mismatches
elseif train == false
	W = repmat(parameters,N,1);
    
    a = W .* X;
    %linear activation:
    A = sum(a,2);
    %cap the linear activation:
    A = A.*double(A<1) + double(A>=1);
    A = A.*double(A>-1) + -double(A<=-1);
    
    output = A;
end

