function out = resLearn(param,d,q,method)
% out = resLearn(param,d,q,method)
% see also description in paramSetup.
% Tobias Siegfried, Diego Pontoriero, 04/16/2007

% initialize non-specific parameters
    rand('seed',1);
V = zeros(param.tEnd,1);
    a = V; % action
    l = a;
    r = a;
    dimL = a;
    h = a;
        % initial values
    V(1) = param.V0; h(1) = resVH(V(1),param.aCoeff,param.bCoeff,'vh');
% do specific stuff
if strcmp(method,'disc')
    param.stateV = 0 : param.discS : param.maxDimL;
    param.actionV = 0 : param.discQ : max(d);
    param.exploreP = param.explorePIni;
    Q = sparse(param.maxDimL/param.discS+1,round(max(d)/param.discQ));
elseif strcmp(method,'hedger')
<<<<<<< .mine
    % hack-ish stuff
    warning off all;
    % hedger algorithm
    addpath('../learning/hedger');
    hedger.X = [];
    hedger.Y = [];
    hedger.h = 1.0;
    hedger.alpha = 0.1;
    hedger.gamma = 0.03;
    % looks like we need some of these from the 'disc' method.
    % *** will refactor code substantially later
    param.stateV = 0 : param.discS : param.maxDimL;
    param.actionV = 0 : param.discQ : max(d);
    % preallocation
    exploreHedger = a;
=======
	% hack-ish stuff
 warning off all;
	
	% hedger algorithm
	addpath('../learning/hedger');
	hedger.X = [];
	hedger.Y = [];
	hedger.h = 10.0;
	hedger.alpha = 0.7;
	hedger.gamma = 0.03;
	hedger.action_count = [0 0 0];
	
	% looks like we need some of these from the 'disc' method.
	% *** will refactor code substantially later
	param.stateV = 0 : param.discS : param.maxDimL;
	param.actionV = 0 : param.discQ : max(d);
	% preallocation
	V = zeros(param.tEnd,1);
	a = V; % action
	l = a;
	r = a;
	dimL = a;
	h = a;
	% initial values
	V(1) = param.V0; h(1) = resVH(V(1),param.aCoeff,param.bCoeff,'vh');
	rand('seed',1);
>>>>>>> .r89
elseif strcmp(method,'wires')
    % design the network
    minState = 0;
    maxState = max(q+param.Vmax/param.dt)/min(d);
    S1 = 10; S2 = param.discQ;
    net = newff([minState maxState],[S1 S2],{'logsig' 'logsig'},'traingdx');
    % net parameters
    net.performFcn = 'sse';        % Sum-Squared Error performance function
    net.trainParam.goal = 0.1;     % Sum-squared error goal
    net.trainParam.show = 20;      % Frequency of progress displays (in epochs)
    net.trainParam.epochs = 5000;  % Maximum number of epochs to train
    net.trainParam.mc = 0.95;      % Momentum constant
elseif strcmp(method,'svm')
    % all bets on Diego again and again.
    % TOO MUCH PRESSURE.
end
% start loop
for t = 1 : param.tEnd
    % t
    if ~mod(t,param.yearEcho*param.nDays), disp(strcat('Year_',num2str(t/param.nDays))), end
    if t == 1
        dimL(t) = abs((q(t) + V(t)/param.dt)) / d(t); % just to get the state at time t=1;
    end

    % determine feasible allocation space
    availH2O = V(t) / param.dt + q(t);

    if availH2O > d(t)
        aFeasible = d(t);
    else
        aFeasible = availH2O;
    end

<<<<<<< .mine
    aFCat = sum(aFeasible>param.actionV);
    if ~aFCat
        aFCat = 1;
    elseif length(param.actionV)==aFCat
        aFCat=aFCat-1;
    end
=======
	% Now the actions
	if strcmp(method,'disc')		
		if rand < param.exploreP
			% explore
			a(t) = rand * aFeasible; flag = 0;
		else
			% exploit
			% search for action with max(Q) given state
			iState = sum(dimL(t) >= param.stateV);
			te = full(Q(iState,1:aFCat)); % this is somewhat faster than the code in line 62.
			iAMax = find(te == max(te));
			% iAMax = find(Q(iState,1:aFCat) == max(Q(iState,1:aFCat)));
			iAMax = max(iAMax); % in case of nonuniquness
			a(t) = param.actionV(iAMax) + ceil(rand*param.discQ);
			if a(t) > aFeasible, a(t) = aFeasible; end
			flag = 1;
		end
	elseif strcmp(method,'hedger')
		% predict optimal action
		pred_action = find_action(hedger.X,hedger.Y,dimL(t),hedger.h);
		% disp(sprintf('t=%d, aFeasible: %f, predicted action: %f',t,aFeasible,pred_action))
		% keyboard
		
		if size(pred_action,1) == 0 || pred_action <= 0
			% if returns empty array then we must explore
			a(t) = rand * aFeasible;
			hedger.action_count(1) = hedger.action_count(1) + 1;
		elseif pred_action > aFeasible
			% reconcile this with feasible action space
			a(t) = aFeasible;
			hedger.action_count(2) = hedger.action_count(2) + 1;
		else
			% go with prediction
			a(t) = pred_action;
			hedger.action_count(3) = hedger.action_count(3) + 1;
		end
	end
	% calculate reward
	%r(t) = a(t) - d(t); % max(r) is always zero. Diego idea: Include LOSS!
	r(t) = a(t) - d(t) - l(t); % max(r) is always zero. Diego idea: Include LOSS!
	% update state
	if ~(t==param.tEnd)
		[V(t+1),h(t+1),l(t+1)] = watBalRes(V(t),q(t),a(t),param);
		dimL(t+1) = abs((q(t+1) + V(t+1)/param.dt)) / d(t+1); % new state.
>>>>>>> .r89

    % Now the actions
    if strcmp(method,'disc')
        if rand < param.exploreP
            % explore
            a(t) = rand * aFeasible; flag = 0;
        else
            % exploit
            % search for action with max(Q) given state
            iState = sum(dimL(t) >= param.stateV);
            te = full(Q(iState,1:aFCat)); % this is somewhat faster than the code in line 62.
            iAMax = find(te == max(te));
            iAMax = max(iAMax); % in case of nonuniquness
            a(t) = param.actionV(iAMax) + ceil(rand*param.discQ);
            if a(t) > aFeasible, a(t) = aFeasible; end
            flag = 1;
        end
    elseif strcmp(method,'hedger')
        % predict optimal action
        pred_action = find_action(hedger.X,hedger.Y,dimL(t),hedger.h);
        % disp(sprintf('t=%d, aFeasible: %f, predicted action: %f',t,aFeasible,pred_action))
        % keyboard

        % if or(size(pred_action,1) == 0,pred_action <= 0)
        if isempty(pred_action)
            % if returns empty array then we must explore
            a(t) = rand * aFeasible;
            exploreHedger(t) = 1;
        elseif pred_action > aFeasible
            % reconcile this with feasible action space
            a(t) = aFeasible;
            exploreHedger(t) = 0;
        else
            % go with prediction
            a(t) = pred_action;
            exploreHedger(t) = 0;
        end
    elseif strcmp(method,'wire')
        keyboard
        aq = sim(net,dimL(t));
        aq = aq * aFeasible;
    end
    % calculate reward
    %r(t) = a(t) - d(t); % max(r) is always zero. Diego idea: Include LOSS!
    r(t) = a(t) - d(t) - l(t); % max(r) is always zero. Diego idea: Include LOSS!
    % update state
    if ~(t==param.tEnd)
        [V(t+1),h(t+1),l(t+1)] = watBalRes(V(t),q(t),a(t),param);
        dimL(t+1) = abs((q(t+1) + V(t+1)/param.dt)) / d(t+1); % new state.
        if or(isinf(dimL(t+1)),isnan(dimL(t+1)))
            dimL(t+1) = max(q+param.Vmax/param.dt)/min(d); % in case dimL turns all weird we use max attainable dimL.
        end
        if strcmp(method,'disc')
            % update Q
            if ~flag
                iState = sum(dimL(t) >= param.stateV);
                iAMax = find(Q(iState,1:aFCat) == max(Q(iState,1:aFCat)));
            end
            iStateNew = sum(dimL(t+1) >= param.stateV);

            Q(iState,iAMax) = (1-param.learnR) * Q(iState,iAMax) + ...
                param.learnR * (r(t) + param.delta * full(max(Q(iStateNew,:))));
            param.exploreP = param.exploreP / param.decreaseP; % decrease the probability of exploration.
        elseif strcmp(method,'hedger')
            % update Q
            [hedger.X,hedger.Y] = update(hedger.X,hedger.Y,dimL(t),a(t),r(t),dimL(t+1),hedger.h,hedger.alpha,hedger.gamma);
        elseif strcmp(method,'wires')
            
            
        end
    end
end
% just some graphical output
figure;
subplot(1,2,1);
plot(q,'b'), hold on; plot(d,'r'); plot(h,'k'); plot(a,'g');hold off; xlabel('time');
subplot(1,2,2);
semilogy(dimL,'c'),xlabel('time'),ylabel('state (dimL)');
% return values
out.param = param;
<<<<<<< .mine
if strcmp(method,'disc'),
    out.Q = Q;
elseif strcmp(method,'hedger')
    out.exploreH = exploreHedger;
=======
out.dimL = dimL; out.V = V;
out.h = h; out.l = l; out.a = a; out.r = r;
if strcmp(method,'disc')
	out.Q = Q; 
elseif strcmp(method,'hedger')
	out.hedger = hedger;
>>>>>>> .r89
end
<<<<<<< .mine
% rest
out.a =a; out.r=r; out.l=l; out.V=V; out.dimL=dimL;=======
>>>>>>> .r89
