function out = learnGP(param,d,q,method,ee,in)
% out = resLearn(param,d,q,method,ee,in)
% see also description in paramSetup.
% sample call: tt = resLearnAG(param,param.dAg,param.q,'disc','undirE');
%
% a try with Gaussian Processes (see Y. Engel (2005))
%
% Tobias Siegfried, 05/15/2007
% resuming work, 06/04/2008
% latest: 10/04/08
%% Initialize non-specific parameters
V = zeros(param.tEnd,param.nAgents);
a = V; % action
l = zeros(param.tEnd,param.nAgents + 1); qTB = l;
r = a; rAcc = a; dimL = a; h = a;
csR = zeros(param.years,param.nAgents);
%param.stateV = cell(param.nAgents,1);
%param.actionV = param.stateV;
param.learnR0 = param.learnR;
%% Do specific stuff
if strcmp(method,'disc')
    maxD = max(d);
    if param.logDimL
        for n = 1 : param.nAgents
            param.stateV{n} = param.minDimL(n) : (param.maxDimL(n) - param.minDimL(n)) / param.discS(n) : param.maxDimL(n);
            param.actionV{n} = ...
                (0 : (maxD(n))/param.discQ(n) : maxD(n))';
        end
    else
        maxD = max(param.d);
        for n = 1 : param.nAgents % discretizing states and actions
            param.stateV{n} = (0 : (param.maxDimL(n)/param.discS(n)) : param.maxDimL(n))';
            param.actionV{n} = ...
                (0 : maxD(n)/param.discQ(n) : maxD(n))';
        end
    end
    if strcmp(ee,'BE')
        N=cell(param.nAgents,1);
        for n = 1 : param.nAgents
            N{n}=zeros(length(param.stateV{n}),1);
        end
    end
    if nargin < 6 % previous runs nonexisting
        flagPrev = 0;
        if param.keepRS
            rand('seed',param.randS);
        end
        Q = param.actionV;
        for n = 1 : param.nAgents
            if param.hydroGuy(n)
                Q{n} = zeros(length(param.stateV{n}),length(param.actionV{n}));%- 99;
            else
                Q{n} = zeros(length(param.stateV{n}),length(param.actionV{n}));
            end

            param.exploreP(n) = param.explorePIni(n);
            if strcmp(ee,'recencyE')
                Qp = Q;
            end
        end
    elseif nargin == 6 % use learned model from previous runs
        flagPrev = 1;
        if param.keepRS, rand('seed',param.randS); end
        if ~isstruct(in)
            error('in must contain a structure with previous model results!');
        end
        Q = in.Q;
        if param.reuseOldStateV
            h(1,:) = in.h(end,:); V(1,:) = in.V(end,:); l(1,:) = in.l(end,:); dimL(1,:) = in.dimL(end,:);
        end
        for n = 1 : param.nAgents
            param.exploreP(n) = param.minExpP(n);
        end
    end
end
if strcmp(method,'GP')
    % GPSARSA learning - initalize state and action vectors
    for n = 1 : param.nAgents
        param.stateV{n} = zeros(param.tEnd,param.stateVDim);
        if param.simpleGPState
            param.stateV{n}(t) = log(abs((q(t,n) + qTB(t+1,n) - param.tBFlow * l(t,n) + V(t,n)/param.dt)) / d(t,n) * (mod(t,12)+1));
        else
            param.stateV{n}(:,1) = (1:param.tEnd)'; % stateVec(1): time
            param.stateV{n}(1,1) = log(param.V0(n)); % stateVec(2): res. level
            param.stateV{n}(:,3) = param.q(:,n); % stateVec(3): recharge
            % stateVec(4): transborder flow - assumed to be zero at the beginning of the simulation
            % stateVec(5): reservoir losses - assumed to be zero at the beginning of the simulation
            param.stateV{n}(:,6) = param.d(:,n); % stateVec(6): water demand
            param.actionV{n} = zeros(param.tEnd,1);
        end
    end
    gp = cell(param.nAgents,1);
    for i = 1 : param.nAgents
        gp{i}.currDSize = 1;
    end
end
%% start loop
for t = 1 : param.tEnd
    for n = 1 : param.nAgents
        if ~mod(t,param.yearEcho*param.nDays) && ~ mod(n,param.nAgents)
            disp(strcat('Year_',num2str(t/param.nDays)))
        end
        % First time step
        if t == 1
            if strcmp(method,'disc')
                if ~ flagPrev
                    V(1,n) = param.V0(n);
                    h(1,n) = resVH(V(1,n),param.aCoeff(n),param.bCoeff(n),'vh');
                end
                if param.logDimL
                    dimL(t,n) = log(abs((q(t,n) + qTB(t+1,n) - param.tBFlow * l(t,n) + V(t,n)/param.dt)) / d(t,n) * (mod(t,12)+1)); % just to get the state at time t=1;
                else
                    dimL(t,n) = abs((q(t,n) + qTB(t+1,n) + param.tBFlow * l(t,n) + V(t,n)/param.dt)) / d(t,n) * (mod(t,12)+1);
                end
                iState(n) = sum(dimL(t,n) >= param.stateV{n});
                flag = 0;
            end

            %if strcmp(method,'GP')
            %    gp.a{n}(1) = 1;
            %    gp.alphaTilde{1,n} = 0;
            %    gp.cTilde{1,n} = 0;
            %    gp.CTilde{1,n} = 0
            %    gp.d{n}(1) = 0;
            %    1/s{n}(0) = 0, take care of 1/s{n}(0) with if conditions in the algo!
            %    keyboard % debug GP
            %end
        end
        %% Feasible allocation space
        if V(t,n)/param.dt <= param.resReleaseConstr
            aFeasible = V(t,n) / param.dt + q(t,n) + param.tBFlow * qTB(t,n);
        else
            aFeasible = param.resReleaseConstr(n) + q(t,n) + param.tBFlow * qTB(t,n);
        end
        %% Actions
        if strcmp(method,'disc')
            % Constrain actions
            aFCat = sum(aFeasible>=param.actionV{n});
            if ~aFCat
                aFCat = 1;
            elseif length(param.actionV{n})==aFCat
                aFCat=aFCat-1;
            end
            if strcmp(ee,'undirE') && param.learn
                % undirected exploration
                [a(:,n),flag,param] = undirE(param, Q{n}, aFeasible, iState(n), aFCat, a(:,n), t, n);
            elseif strcmp(ee,'BE') && param.learn
                % Boltzmann exploration
                [a(:,n),flag,param,N{n}] = BoltzmannE(param, Q{n}, aFeasible, iState(n), aFCat, a(:,n), t, n, N{n});
            elseif strcmp(ee,'recencyE') && param.learn
                % directed exploration: recency-based exploration
                [a,flag] = recencyE(param, Q{n}, a, t, iState, aFCat,n);
            elseif ~param.learn
                if V(t,n)/param.dt > d(t,n), a(t,n) = d(t,n); else a(t,n) = V(t,n)/param.dt; end
            end
        end

        if strcmp(method,'GP')
            % greedy - for the time being only valid for the particular
            if param.epsilon <= rand
                % linear kernel selection
                %sens = param.stateV{n}(t,:)';
                %Q = computeQ(param,gp{n},sens,aFeasible);
                a(t,n) = rand * aFeasible;
            else
                % random
                a(t,n) = rand * aFeasible;
                %param.epsilon = param.epsilon / param.decreaseP(n);
            end
            % param.epsilon - update epsilon with a reward thingy. example:
            % decrease epsilon quicker when reward (e.g. annual) is
            % improved...
        end
        %% Reward
        if ~param.hydroGuy(n)
            rAcc(t,n) = - abs(a(t,n) - d(t,n));
            if ~mod(t,param.harvestTime)
                csR(ceil(t/param.nDays),n) = min(min(1 - param.yieldResponseFactor * ...
                    (1 - cumsum(a(t - param.harvestTime + 1 : t,n))./cumsum(d(t - param.harvestTime + 1 : t,n)))),1);
                r(t,n) = csR(ceil(t/param.nDays),n) * param.maxCropP;
            else
                r(t,n)=0;
            end
        else % Hydropower producer
            r(t,n) = - abs(a(t,n) - d(t,n));
        end
        %% Update state
        if ~(t==param.tEnd)
            % Water balance
            [V(t+1,n),h(t+1,n),l(t,n)] = watBalRes(V(t,n),q(t,n),a(t,n),qTB(t,n),param,n);

            if param.hydroGuy(n) %&& n < param.nAgents
                qTB(t+1,n+1) = qTB(t+1,n+1) + l(t,n) + a(t,n);
            elseif ~param.hydroGuy(n) %&& n < param.nAgents
                qTB(t+1,n+1) = qTB(t+1,n+1) + l(t,n);
            end

            if strcmp(method,'disc')
                if param.logDimL
                    dimL(t+1,n) = log(abs((q(t+1,n) + qTB(t+1,n) - param.tBFlow * l(t+1,n) + V(t+1,n)/param.dt)) / d(t+1,n) * (mod(t,12)+1)); % new state.
                else
                    if ~param.circular && n == 1
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + param.tBFlow * l(t+1,n) + V(t+1,n)/param.dt)) / d(t+1,n) * (mod(t,12)+1); % new state.
                    elseif param.circular && n==1
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + param.tBFlow * (l(t+1,n)+l(t,end)) + V(t+1,n)/param.dt)) / d(t+1,n); % new state.
                    else
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + param.tBFlow * l(t+1,n) + V(t+1,n)/param.dt)) / d(t+1,n); % new state.
                    end
                end
                if or(isinf(dimL(t+1,n)),isnan(dimL(t+1,n)))
                    dimL(t+1,n) = param.maxDimL(n); % in case dimL turns all weird we use max attainable dimL.
                end
            end

            if strcmp(method,'GP')
                param.stateV{n}(t+1,1) = mod(t+1,param.nDays);
                param.stateV{n}(t+1,2) = log(V(t+1,n));
                param.stateV{n}(t+1,4) = qTB(t+1,n); % check!
                param.stateV{n}(t,5) = l(t,n); % check!
            end
            %% update Q
            if strcmp(method,'disc') && param.learn
                iStateNew(n) = sum(dimL(t+1,n) >= param.stateV{n});
                aCat = sum(a(t,n) >= param.actionV{n});
                Q{n}(iState(n),aCat) = (1-param.learnR(n)) * Q{n}(iState(n),aCat) + ...
                    param.learnR(n) * (r(t,n) + param.gamma * max(Q{n}(iStateNew(n),:))); % Q-Learning
                param.exploreP(n) = param.exploreP(n) / param.decreaseP(n); % decrease the probability of exploration.
                param.learnR(n) = param.learnR0(n) / (1 + param.learnRBeta * t / (size(Q{n},1).*size(Q{n},2))); % learning rate annealing
                if param.exploreP(n) < param.minExpP(n)
                    param.exploreP(n) = param.minExpP(n); % just ensure a minimal amount of exploration.
                end
                iState = iStateNew;
            end

            if strcmp(method,'GP')
                % here, then, finally comes GPSARSA!
                if t==1
                    % init
                    sens = param.stateV{n}(t,:)';
                    gp{n} = startEpisode(param,gp{n},sens,a(t,n),n);
                else
                    sens = param.stateV{n}(t-1,:)';
                    sensN = param.stateV{n}(t,:)';
                    act = a(t-1,n); 
                    actN = a(t,n);
                    reward = r(t,n);
                    [param,gp{n}] = gpSARSA2(param,gp{n},sens,act,reward,sensN,actN,n,t);
                end

            end
        end
    end
%     if ~mod(t,5)
%         for ta = 1 : 100
%             Q(ta) = computeQ(param,gp{1},sens,ta,1);
%         end
%         plot(Q), keyboard
%     end
end
%% just some graphical output
if param.plotRes
    %     for n = 1 : param.nAgents
    %         h(:,n) = resVH(V(:,n),param.aCoeff(n),param.bCoeff(n),'vh');
    %         figure(n); subplot(2,1,1); title(['Agent_' num2str(n)]);
    %         plot(d(:,n),'r'); hold on; plot(a(:,n),'g'); plot(h(:,n),'k'); plot(q(:,n),'b');
    %         plot(qTB(:,n),'--b');hold off; xlabel('time (years)');
    %         legend('d_{i}(t)','a_{i}(t)','h_{i}(t)','q_{i}(t)','qTB_{i}(t)');
    %         drawnow, datetick('x',11)
    %         subplot(2,1,2); semilogy(dimL(:,n),'c'),xlabel('time (years)'),ylabel('\sigma_{i}(t)');
    %         drawnow, datetick('x',11)
    %     end

    %for n = 1 : param.nAgents
    h(:,n) = resVH(V(:,n),param.aCoeff(n),param.bCoeff(n),'vh');
    figure(1);
    subplot(2,1,1); title(['Agent_' num2str(n)]);
    plot(d(:,1),'r'); hold on; plot(a(:,1),'g'); plot(h(:,1),'k'); plot(q(:,1),'b');
    plot(qTB(:,1),'--b');hold off; xlabel('time (years)');
    legend('d_{1}(t)','a_{1}(t)','h_{1}(t)','q_{1}(t)','qTB_{1}(t)');
    drawnow, %datetick('x',11)
    subplot(2,1,2);
    plot(d(:,2),'r'); hold on; plot(a(:,2),'g'); plot(h(:,2),'k'); plot(q(:,2),'b');
    plot(qTB(:,2),'--b');hold off; xlabel('time (years)');
    legend('d_{2}(t)','a_{2}(t)','h_{2}(t)','q_{2}(t)','qTB_{2}(t)');
    drawnow, %datetick('x',4)
    %end
end

%keyboard

%% return values
out.param = param;
if strcmp(method,'disc'),
    out.Q = Q;
end
%% return variables
out.a = a; out.r = r; out.l = l; out.V = V; out.dimL = dimL; out.h = h;  out.qTB = qTB; out.csR = csR;
out.learnR = param.learnR;
if strcmp(method,'GP')
    out.gp = gp;
end