% KM_DEMO_KRLS_MG30 One-step prediction using kernel adaptive filtering
% algorithms on the MG30 Mackey-Glass data set.
%
% This program contains code to generate a Mackey-Glass time series using
% the 4th order Runge-Kutta method, based on a Matlab implementation
% provided by Eric Wan, available at http://www.cse.ogi.edu/~ericwan/
%
% Author: Steven Van Vaerenbergh (steven *at* gtas.dicom.unican.es), 2010.
%
% This file is part of the Kernel Methods Toolbox for MATLAB.
% https://github.com/steven2358/kmbox

close all
clear

%% PARAMETERS

N = 5000;		% number of data points sampled from Mackey-Glass data set

pars.kernel.type = 'gauss';	% kernel type
pars.kernel.par = .8;			% kernel parameter (width in case of Gaussian kernel)
pars.M = 100;				% maximum dictionary size

% % parameters for ALD-KRLS
% pars.algo = 'ald-krls';
% pars.thresh = 0.001;

% % parameters for SW-KRLS
% pars.algo = 'sw-krls';
% pars.c = 1E-4;

% parameters for KRLS-T
pars.algo = 'krlst';
pars.c = 1E-4;
pars.lambda = 1; % no forgetting

% % parameters for QKLMS
% pars.algo = 'qklms';
% pars.mu = 0.1;
% pars.epsu = 0.3;

% % parameters for NORMA (KLMS)
% pars.algo = 'norma';
% pars.mu = 0.35;		% learning rate
% pars.c = 1E-4;	% regularization parameters

embed = 7;			% number of samples in time-embedding
step = 1;			% prediction step
Ntrain = 3000;
Ntest = 1000;

% The Mackey-Glass time-series are generated by the following nonlinear
% delayed differential equation:
% $$\frac{dx(t)}{dt} = -ax(t) + \frac{bx(t-\tau)}{1+x(t-\tau)^{10}}$$
a = 0.1;
b = 0.2;
x0 = 0.9;	% initial condition
dt = 0.01;	% integration step
tt = 6;		% sampling step
tau = 30;	% Mackey-Glass paramater

%% PROGRAM
tic

fprintf('Generating Mackey-Glass time series...\n');
N_full = N*tt/dt;			% full number of samples
tau_full = tau/dt;
xmg = x0*ones(N_full+1,1);	% initial conditions
% Runge-Kutta Method
for n = tau_full+1:N_full,
	xx = xmg(n);
	xd = xmg(n-tau_full);
	bxd = b*xd/(1+xd^10);
	xk1 = dt*(-a*xx + bxd);
	xk2 = dt*(-a*(xx+xk1/2) + bxd);
	xk3 = dt*(-a*(xx+xk2/2) + bxd);
	xk4 = dt*(-a*(xx+xk3) + bxd);
	nn = n+1;
	xmg(nn) = xx + xk1/6 + xk2/3 +xk3/3 +xk4/6;
end
% resample every tt/dt
mg = xmg(1:tt/dt:N_full);
x = mg;

% prepare train and test data
x_train = zeros(Ntrain,embed);
for i=1:Ntrain,
	x_train(i,:) = x(i:i+embed-1);
end
x_test = zeros(Ntest,embed);
for i=1:Ntest,
	x_test(i,:) = x(i+Ntrain:i+embed-1+Ntrain);
end
y_train = x(embed+step:embed+step+Ntrain-1);
y_test = x(embed+step+Ntrain:embed+step+Ntest-1+Ntrain);

% apply KRLS
fprintf('Applying %s for %d-step prediction...\n',pars.algo,step);
vars = [];
MSE = zeros(Ntrain,1);
for i=1:Ntrain,
	if ~mod(i,Ntrain/10), fprintf('.'); end
	vars.t = i;
	
	% perform KRLS regression and get regression output of test signal
	switch pars.algo
		case 'ald-krls'
			vars = km_aldkrls(vars,pars,x_train(i,:),y_train(i));	% train
			y_est = km_aldkrls(vars,pars,x_test);			% evaluate
		case 'sw-krls'
			vars = km_swkrls(vars,pars,x_train(i,:),y_train(i));	% train
			y_est = km_swkrls(vars,pars,x_test);			% evaluate
		case 'krlst'
			vars = km_krlst(vars,pars,x_train(i,:),y_train(i));	% train
			y_est = km_krlst(vars,pars,x_test);			% evaluate
		case 'qklms'
			vars = km_qklms(vars,pars,x_train(i,:),y_train(i));	% train
			y_est = km_qklms(vars,pars,x_test);			% evaluate
		case 'norma'
			vars = km_norma(vars,pars,x_train(i,:),y_train(i));	% train
			y_est = km_norma(vars,pars,x_test);			% evaluate
		otherwise
			error('wrong algorithm')
	end
	
	% calcalate test error
	MSE(i) = mean((y_test-y_est).^2);
end
fprintf('\n');

toc
%% OUTPUT

figure;plot(mg(1:500))
title(sprintf('Mackey-Glass time series for tau=%d', tau));

fprintf('Mean MSE over last 500 steps = %.2d\n',mean(MSE(Ntrain-499:Ntrain)))

figure;semilogy(MSE)
title('MSE')

figure; hold on
plot(y_test,'b')
plot(y_est,'g')
legend({'Test output','Estimated output'})
title(sprintf('%s mem=%d',pars.algo,length(vars.basis)))
