function out = resLearn2(param,d,q,method,ee,Q)
% out = resLearn(param,d,q,method,ee)
% see also description in paramSetup.
% sample call: tt=resLearn2(param,param.dAg,param.q,'disc','undirE');
% Tobias Siegfried, Diego Pontoriero, 04/24/2007

% Initialize non-specific parameters
rand('seed',param.randS);
V = zeros(param.tEnd,1);
a = V; % action
l = a;
r = a;
dimL = a;
h = a;
% Initial values
V(1) = param.V0; 
h(1) = resVH(V(1),param.aCoeff,param.bCoeff,'vh');
% Do specific stuff
if strcmp(method,'disc')
    param.stateV = 0 : param.discS : param.maxDimL;
    %param.stateV = -param.maxDimL : param.discS : param.maxDimL; % in the log case
    param.actionV = 0 : param.discQ : max(d) - param.discQ;
    %Q = sparse(round(param.maxDimL/param.discS)+1,round(max(d)/param.discQ));
    %Q = zeros(round(param.maxDimL/param.discS)+1,round(max(d)/param.discQ));
    %Q1 = 0:param.discS/param.maxDimL:1;
    if nargin < 6
        %  Q1 = 0:1/round(max(d)/param.discQ):1-1/round(max(d)/param.discQ);
        %  Q = repmat(Q1,[param.maxDimL+1 1]);
        Q = zeros(round(param.maxDimL/param.discS)+1,round(max(d)/param.discQ));
        param.exploreP = param.explorePIni;
        if strcmp(ee,'recencyE')
            Qp = Q;
        end
    elseif nargin ==5
        param.exploreP = param.minExpP;
    end
elseif strcmp(method,'hedger')
    % hack-ish stuff
    warning off all;
    % hedger algorithm
    addpath('../learning/hedger');
    hedger.X = [];
    hedger.Y = [];
    hedger.h = 1.0;
    hedger.alpha = 0.1;
    hedger.gamma = 0.03;
    % looks like we need some of these from the 'disc' method.
    % *** will refactor code substantially later
    param.stateV = 0 : param.discS : param.maxDimL;
    %param.stateV = -param.maxDimL : param.discS : param.maxDimL; % in the log case
    param.actionV = 0 : param.discQ : max(d);
    % preallocation
    exploreHedger = a;
elseif strcmp(method,'wires')
    % design the network
    minState = 0;
    maxState = max(q+param.Vmax/param.dt)/min(d);
    S1 = 10; S2 = 2*param.discQ; % S2 is 2*... since qe output a and q(a)!
    net = newff([minState maxState],[S1 S2],{'logsig' 'logsig'},'traingdx');
    % net parameters
    net.performFcn = 'sse';        % Sum-Squared Error performance function
    net.trainParam.goal = 0.1;     % Sum-squared error goal
    net.trainParam.show = 20;      % Frequency of progress displays (in epochs)
    net.trainParam.epochs = 5000;  % Maximum number of epochs to train
    net.trainParam.mc = 0.95;      % Momentum constant
elseif strcmp(method,'svm')
    % all bets on Diego again and again.
    % TOO MUCH PRESSURE.
end
% start loop
for t = 1 : param.tEnd
    if ~mod(t,param.yearEcho*param.nDays), disp(strcat('Year_',num2str(t/param.nDays))), end
    if t == 1
        dimL(t) = abs((q(t) + V(t)/param.dt)) / d(t); % just to get the state at time t=1;
        iState = sum(dimL(t) >= param.stateV);
        flag = 0;
    end
    % determine feasible allocation space
    availH2O = V(t) / param.dt + q(t);
    if availH2O > d(t)
        aFeasible = d(t);
    else
        aFeasible = availH2O;
    end
    %aFeasible = V(t) / param.dt + q(t);
    if strcmp(method,'disc')
        aFCat = sum(aFeasible>=param.actionV);
        if ~aFCat
            aFCat = 1;
        elseif length(param.actionV)==aFCat
            aFCat=aFCat-1;
        end
    end
    % Now the actions
    if strcmp(method,'disc')
        if strcmp(ee,'undirE')
            % undirected exploration
            [a,flag] = undirE(param, Q, aFeasible, iState, aFCat, a, t,1);
            %keyboard
        elseif strcmp(ee,'recencyE')
            % directed exploration: recency-based exploration
            % keyboard
            [a,flag] = recencyE(param, Q, a, t, iState, aFCat);
            % keyboard
        end
    elseif strcmp(method,'hedger')
        % predict optimal action
        pred_action = find_action(hedger.X,hedger.Y,dimL(t),hedger.h);
        % disp(sprintf('t=%d, aFeasible: %f, predicted action: %f',t,aFeasible,pred_action))
        % keyboard
        % if or(size(pred_action,1) == 0,pred_action <= 0)
        if isempty(pred_action)
            % if returns empty array then we must explore
            a(t) = rand * aFeasible;
            exploreHedger(t) = 1;
        elseif pred_action > aFeasible
            % reconcile this with feasible action space
            a(t) = aFeasible;
            exploreHedger(t) = 0;
        else
            % go with prediction
            a(t) = pred_action;
            exploreHedger(t) = 0;
        end
    elseif strcmp(method,'wires')
        aq = sim(net,dimL(t));
        aq = aq * aFeasible;
        a(t) = aq(aq(param.discQ+1:end,:)==max(aq(param.discQ+1:end,:)));
    end
    % calculate reward
    %r(t) = a(t) - d(t); % max(r) is always zero. Diego idea: Include LOSS!
    %r(t) = a(t) - d(t) - l(t); % this reward just learns to consume the
    %demanded quantity and thus learns the simple rule 'consume everything
    %available but not more than the actual demand'.
    %r(t) = V(t)/param.dt - (a(t) - d(t)) - l(t); % this reward function actually leads to conservation!
    %r(t) = h(t) - (a(t) - d(t)) - l(t); % this reward function actually leads to conservation!
    %r(t) = V(t) / (a(t)-d(t)+1);
    %r(t) = V(t)/param.Vmax - (a(t) - d(t))./max(d) ; % this reward function actually leads to conservation!
    %r(t) = V(t)/param.Vmax - (a(t) - d(t));
    r(t) = - abs(a(t) - d(t));
    % update state
    if ~(t==param.tEnd)
        [V(t+1),h(t+1),l(t+1)] = watBalRes(V(t),q(t),a(t),l(t),param,1);
        %[V(t+1),l(t+1)] = watBalRes(V(t),q(t),a(t),param);
        dimL(t+1) = abs((q(t+1) + V(t+1)/param.dt)) / d(t+1); % new state.
        if or(isinf(dimL(t+1)),isnan(dimL(t+1)))
            dimL(t+1) = param.maxDimL; % in case dimL turns all weird we use max attainable dimL.
        end
        if strcmp(method,'disc')
            % update Q
            %if or(~flag,strcmp(ee,'recencyE'))
            %    iAMax = sum(Q(iState,1:aFCat) == max(Q(iState,1:aFCat)));
            % iAMax = find(Q(iState,1:aFCat) == max(Q(iState,1:aFCat)));
            %end
            iStateNew = sum(dimL(t+1) >= param.stateV);
            %keyboard
            aCat = sum(a(t)>=param.actionV);
            Q(iState,aCat) = (1-param.learnR) * Q(iState,aCat) + ...
                param.learnR * (r(t) + param.delta * full(max(Q(iStateNew,:))));
            % Q(iState,iAMax) = (1-param.learnR) * Q(iState,iAMax) + ...
            %    param.learnR * (r(t) + param.delta * full(max(Q(iStateNew,:))));
            param.exploreP = param.exploreP / param.decreaseP; % decrease the probability of exploration.
            if param.exploreP < param.minExpP
                param.exploreP = param.minExpP; % just ensure a minimal amount of exploration.
            end
            iState = iStateNew;
        elseif strcmp(method,'hedger')
            % update Q
            [hedger.X,hedger.Y] = update(hedger.X,hedger.Y,dimL(t),a(t),r(t),dimL(t+1),hedger.h,hedger.alpha,hedger.gamma);
        elseif strcmp(method,'wires')
            % train net
            [net,a,e,pf] = adapt(net,P,T,Pi);
        end
    end
end
% just some graphical output
h = resVH(V,param.aCoeff,param.bCoeff,'vh');
figure;
subplot(1,2,1);
plot(a,'g'); hold on; plot(d,'r'); plot(h,'k'); plot(q,'b');hold off; xlabel('time');
%semilogy(a,'g'); hold on; semilogy(q,'b'); semilogy(d,'r'); semilogy(h,'k'); hold off; xlabel('time');
subplot(1,2,2);
plot(dimL,'c'),xlabel('time'),ylabel('state (dimL)');
% return values
out.param = param;
if strcmp(method,'disc'),
    out.Q = Q;
elseif strcmp(method,'hedger')
    out.exploreH = exploreHedger;
end
% return variables
out.a = a; out.r = r; out.l = l; out.V = V; out.dimL = dimL; out.h = h;
%keyboard