function [a,flag] = recencyE(param, Q, a, t, iState, aFCat,n)
% recency-based exploration (see: Overcoming Non-stationarity in
% uncommunicative learning, Zhu, Ballard 2002)
%
% Tobias Siegfried, 24.04.2007

% Softmax action choice
% -


beta = 





tQ = Q(iState,1:aFCat);
tQ = tQ(:);
%keyboard
% Gibbs
% eQ = exp(tQ/param.smoothR);
% p = eQ ./ sum(eQ);
% pCS = cumsum(p);
% rN = rand;
% a(t) = param.actionV{n}(sum(rN>pCS)+1);
% adding long tailed distribution
%keyboard
%figure(100),plot(tQ), hold on,
subt = .001 * randraw('exp', 0.5 ,[1 length(tQ)])';
tQ = tQ - subt;
%tQ = tQ - randraw('gamma', [0 abs(mean(tQ))+.0001 10000], [1 length(tQ)]);
a(t) = param.actionV{n}(find(tQ==max(tQ)));
%plot(tQ,'r'), hold off
%keyboard
% endgame
flag = 2;
%figure(100),plot(pCS),drawnow

eQ = exp(full(action)/g.softMaxTemp);
temp = eQ ./ repmat(sum(eQ),size(action,1),1);
p = [zeros(1,size(temp,2)); cumsum(temp)]; d = rand(1,size(temp,2));
d=repmat(d,size(p,1),1); allI = repmat((1:size(p,1))',1,size(p,2));
loc = d>p; varargout{1} = max(allI.*loc)';
