%% Matlab KRLS Test Code 
%
% Set up a few parameters
%

kernel_func = @rbf4nn;   % the Gaussian kernel
kparam = 0.1;            % the variance of the Gaussian
ald_thresh = 0.1;        % the linear dependence threshold

% The kernel function should accept three parameters:
%  p1: a d-by-m matrix, containing m d-dimensional datapoints
%  p2: a d-by-n matrix, containing n d-dimensional datapoints
%  p3: some sort of parameter
%
%  the function should return an m-by-n matrix D, where
%  D_{ij} is the kernel of p1_i with p2_j.
%
%  The parameter can be whatever you want; it's the "kparam"
%  variable above.

num_samples = 100;
num_dims = 5;

%
% ------------------------------------------
%% Generate some random data
%

data = rand( num_dims, num_samples );
targets = rand( 1, num_samples );

%
% ------------------------------------------
%% Run the KRLS algorithm
%

kp = krls_init( kernel_func, kparam, ald_thresh, data(:,1), targets(1) );

%% feed in examples one at a time
tic
for i = 2:num_samples
  kp = krls( kp, data(:,i), targets(i) );
end;
toc
%
% ------------------------------------------
% Done!
% Now the structure kp has several interesting things. The
% most useful are:
%
% kp.dp.Dict is a matrix containing the dictionary points
% kp.Alpha is the learned weights
%

%
% ------------------------------------------
% To predict a new point, you would do the following:
%
%% get the test point (in this case, it's random)
test_input = rand( num_dims, 1 );

%% compute the kernel of the input with the dictionary
kernvals = feval( kernel_func, test_input, kp.dp.Dict, kp.dp.kparam );

%% compute the weighted sum
target =  kernvals * kp.Alpha;

%% this is exactly what the krls_query function does!
target = krls_query( kp, test_input )

