function [X,Y] = update(X,Y,s,a,r,sp,h,al,ga,debug)
%
% function [X,Y] = update(X,Y,s,a,r,sp,h,alpha,gam)
%
% hedger updater function, given reward
%
% inputs:  X,Y  = current data and values
%          s,a  = state-action undertaken
%          r,sp = reward and state obtained
%          h    = LWR bandwidth parameter
%          al,ga  = learning rate (alpha) and discount rate (gamma)
%
% outputs: X,Y  = updated data and values
%
% Diego Pontoriero, 2007-4-15

%	default h value
if nargin < 7
	h = 1.0;
end

%	default alpha value
if nargin < 8
	al = 0.5;
end

%	default gamma value
if nargin < 9
	ga = 0.5;
end

if nargin < 10
	debug = false;
end

% predict current Q value, save K, b, W stuff
[Q,indices,kappas] = predict(X,Y,s,a,h);

% --- predict next Q with iterative process ---
% here we try to find the optimal action for the next state by sampling
% from state-action combinations around the given state
n = 5;	% sample from a 5-degree polynomial surface
epsilon = 1e-9;	% convergence criteria
Q_next = 0;
st = sp;
stp = st';
iter = 1;
max_iter = 10;

iter = 0;
if size(X,1) > n
	while iter < max_iter
        % sample n actions around the state
		% distances = sqrt(sum((repmat(st',size(X,1),1) - X(:,1:size(st,1))).^2,2));
		distances = sum((stp(ones(size(X,1),1),:) - X(:,1:size(st,1))).^2,2);
		[sorted,ix] = sort(distances);
		ix = ix(1:n);	% keep first n indices
		samples_x = X(ix,:);
		actions = samples_x(:,size(st,1)+1:end);

		% predict q-values for these actions
		for i=1:n
			samples_q(i,:) = predict(X,Y,st,actions(i,:)',h);
		end
		
		% fit a quadratic surface to the sampled points (no cross terms for now)
		A = [stp(ones(size(actions,1),1),:) actions];
		% A = [repmat(st',size(actions,1),1) actions];
		A = [A A.^2];
		x = A\samples_q;
	
		% find maximum of fitted function
		rhs = x(1:size(x,1)/2);
		lhs = 2*x(size(x,1)/2+1:end);
		lhs(~lhs) = ones(size(lhs(~lhs))); 
		maximum = -rhs./lhs;
	
		% sample new point at maximum of fitted function
		st = maximum(1:size(st,1));
		act = maximum(size(st,1)+1:end);
		Q_pred = predict(X,Y,st,act,h);
	
		% stopping condition	
		if abs(Q_pred - Q_next) < epsilon
			break;
		end
		Q_next = Q_pred;
		iter = iter + 1;
	end
end
% --- end Q_next predictor ---

% calculate new Q
Q_new = Q + al*(r + ga*Q_next - Q);

% learn Q(s,a) = Q_new
X(size(X,1)+1,:) = [s; a]';
Y(size(Y,1)+1,:) = Q_new;

if debug
	keyboard
end
% for each point in K, Q(s_i,a_i) = Q(s_i,a_i) + kappa_i(Q_new - Q(s_i,a_i))
if size(indices,1) > 0 & Q ~= 0
	Y(indices) = Y(indices) + kappas.*(repmat(Q_new,size(Y(indices),1),1) - Y(indices));
end
