function out = learnGP1(p,d,q,method,ee,in)
% out = learnGP(p,d,q,method,ee,in)
%
% see also description in pSetup.
% a try with Gaussian Processes (see Y. Engel (2005))
%
% Tobias Siegfried, 05/15/2007
% resuming work, 06/04/2008
% latest: 10/04/08
% debugging: 10/29/08
% Now it just occured to me that the agent should learn how to operate his
% reservoir, that is, how to keep the filling given future uncertainty and
% not how much to take given the demand.
%% Initialize non-specific peters
V = zeros(p.tEnd,p.nAgents);
a = V; % action
l = zeros(p.tEnd,p.nAgents + 1); qTB = l;
r = a; rAcc = a; dimL = a; h = a;
csR = zeros(p.years,p.nAgents);
p.learnR0 = p.learnR;
%% Do specific stuff
if strcmp(method,'disc')
    maxD = max(d);
    if p.logDimL
        for n = 1 : p.nAgents
            p.stateV{n} = p.minDimL(n) : (p.maxDimL(n) - p.minDimL(n)) / p.discS(n) : p.maxDimL(n);
            p.actionV{n} = (0 : (maxD(n))/p.discQ(n) : maxD(n))';
        end
    else
        maxD = max(p.d);
        for n = 1 : p.nAgents % discretizing states and actions
            p.stateV{n} = (0 : (p.maxDimL(n)/p.discS(n)) : p.maxDimL(n))';
            p.actionV{n} = (0 : maxD(n)/p.discQ(n) : maxD(n))';
        end
    end
    if strcmp(ee,'BE')
        N=cell(p.nAgents,1);
        for n = 1 : p.nAgents
            N{n}=zeros(length(p.stateV{n}),1);
        end
    end
    if nargin < 6 % previous runs nonexisting
        flagPrev = 0;
        if p.keepRS
            rand('seed',p.randS);
        end
        Q = p.actionV;
        for n = 1 : p.nAgents
            if p.hydroGuy(n)
                Q{n} = zeros(length(p.stateV{n}),length(p.actionV{n}));%,p.nDays);%- 99;
            else
                Q{n} = zeros(length(p.stateV{n}),length(p.actionV{n}));
            end

            p.exploreP(n) = p.explorePIni(n);
            if strcmp(ee,'recencyE')
                Qp = Q;
            end
        end
    elseif nargin == 6 % use learned model from previous runs
        flagPrev = 1;
        if p.keepRS, rand('seed',p.randS); end
        if ~isstruct(in)
            error('in must contain a structure with previous model results!');
        end
        Q = in.Q;
        if p.reuseOldStateV
            h(1,:) = in.h(end,:); V(1,:) = in.V(end,:); l(1,:) = in.l(end,:); dimL(1,:) = in.dimL(end,:);
        end
        for n = 1 : p.nAgents
            p.exploreP(n) = p.minExpP(n);
        end
    end
end
if strcmp(method,'GP')
    % GPSARSA learning - initalize state and action vectors
    for n = 1 : p.nAgents
        if p.simpleGPState
            t=1;
            p.stateV{n}(t) = (abs((q(t,n) + qTB(t+1,n) - p.tBFlow * l(t,n) + V(t,n)/p.dt)) / d(t,n));% * (mod(t,p.nDays)+1));
            p.actionV{n} = zeros(p.tEnd,1);
            %keyboard
        else
            p.stateV{n}(:,1) = mod(1:p.tEnd,p.nDays)' + 1; % stateVec(1): time - use mod(t,dt) here!
            %p.stateV{n}(1,2) = log(p.V0(n)); % stateVec(2): res. level
            p.stateV{n}(1,2) = p.V0(n) / p.dt; % stateVec(2): res. level
            p.stateV{n}(:,3) = p.q(:,n); % stateVec(3): recharge
            % stateVec(4): transborder flow - assumed to be zero at the beginning of the simulation
            % stateVec(5): reservoir losses - assumed to be zero at the beginning of the simulation
            p.stateV{n}(:,6) = p.d(:,n); % stateVec(6): water demand
            p.actionV{n} = zeros(p.tEnd,1);
        end
    end
    gp = cell(p.nAgents,1);
    for i = 1 : p.nAgents
        gp{i}.currDSize = 1;
    end
end
%% debug plotting ini
if p.debugOn
    figure(99)
end
%% start loop
for t = 1 : p.tEnd
    for n = 1 : p.nAgents
        if ~mod(t,p.yearEcho*p.nDays) && ~ mod(n,p.nAgents)
            disp(strcat('Year_',num2str(t/p.nDays)))
        end
        % First time step
        if t == 1
            if strcmp(method,'disc')
                if ~ flagPrev
                    V(1,n) = p.V0(n);
                    h(1,n) = resVH(V(1,n),p.aCoeff(n),p.bCoeff(n),'vh');
                end
                if p.logDimL
                    dimL(t,n) = log(abs((q(t,n) + qTB(t+1,n) - p.tBFlow * l(t,n) + V(t,n)/p.dt)) / d(t,n)); % just to get the state at time t=1;
                else
                    dimL(t,n) = abs((q(t,n) + qTB(t+1,n) + p.tBFlow * l(t,n) + V(t,n)/p.dt)) / d(t,n);
                end
                iState(n) = sum(dimL(t,n) >= p.stateV{n});
                flag = 0;
            elseif strcmp(method,'GP')
                V(1,n) = p.V0(n);
                h(1,n) = resVH(V(1,n),p.aCoeff(n),p.bCoeff(n),'vh');
            end
        end
        %% Feasible allocation space
        if V(t,n)/p.dt <= p.resReleaseConstr
            aFeasible = V(t,n) / p.dt + q(t,n) + p.tBFlow * qTB(t,n);
        else
            aFeasible = p.resReleaseConstr(n) + q(t,n) + p.tBFlow * qTB(t,n);
        end
        %% Actions
        if strcmp(method,'disc')
            % Constrain actions
            aFCat = sum(aFeasible>=p.actionV{n});
            if ~aFCat
                aFCat = 1;
            elseif length(p.actionV{n})==aFCat
                aFCat=aFCat-1;
            end
            if strcmp(ee,'undirE') && p.learn % undirected exploration
                [a(:,n),flag,p] = undirE(p, Q{n}, aFeasible, iState(n), aFCat, a(:,n), t, n);
            elseif strcmp(ee,'BE') && p.learn % Boltzmann exploration
                [a(:,n),flag,p,N{n}] = BoltzmannE(p, Q{n}, aFeasible, iState(n), aFCat, a(:,n), t, n, N{n});
            elseif strcmp(ee,'recencyE') && p.learn % directed exploration: recency-based exploration
                [a,flag] = recencyE(p, Q{n}, a, t, iState, aFCat,n);
            elseif ~p.learn
                if V(t,n)/p.dt > d(t,n), a(t,n) = d(t,n); else a(t,n) = V(t,n)/p.dt; end
            end
        end

        if strcmp(method,'GP')
            % greedy - for the time being only valid for the particular
            if t>1 && aFeasible>0
                sta = p.stateV{n}(end,:)';
                %action = max(p.actionV{n});
                action = (0:(aFeasible/p.actDisc):aFeasible)';
                Q = zeros(p.actDisc,1);
                Vari = Q;
                for i = 1 : length(action)
                    [Q(i),Vari(i)] = computeQ(p,gp{1},sta,action(i));
                end
                if p.debugOn
                    disp(['state: ' num2str(sta')])
                    figure(99), subplot(2,1,n)
                    plot(action,Q)
                    hold on
                    plot(action,Vari,'k')
                    hold off
                end
                %keyboard
                if p.epsilon <= rand % maximize
                    %disp('MAX');
                    maxA = Q == max(Q);
                    if length(find(maxA)) > 1
                        randA = rand(size(maxA));
                        maxA = (randA == max(randA));
                    end
                    a(t,n) = action(maxA);
                    
                    %keyboard
                    
                    %p.epsilon = p.epsilon / p.decreaseP(n);
                    if p.debugOn
                        disp(['MAXIMIZE: Agent: ' num2str(n) 'choose max Q at ',num2str(t), ' : a(t): ' num2str(a(t,n))])
                        hold on
                        subplot(2,1,n), plot(a(t,n),0,'r+')
                        hold off
                        %keyboard
                    end
                    %p.epsilon = p.epsilon / p.decreaseP(n);
                    p.maxEvents(t) = 1;
                else % search
                    %disp('SER');
                    maxVari = Vari==max(Vari);
                    if length(find(maxVari))>1
                        randVari = rand(size(maxVari));
                        maxVari = randVari==max(randVari);
                    end
                    
                    %keyboard
                    
                    a(t,n) = action(maxVari);
                    %p.epsilon = p.epsilon / p.decreaseP(n);
                    if p.debugOn
                        disp(['SEARCH: Agent: ' num2str(n) ' choose max Var at ',num2str(t), ' : a(t): ' num2str(a(t,n))])
                        hold on
                        subplot(2,1,n), plot(a(t,n),0,'r+')
                        hold off
                        %keyboard
                    end
                end
                %if abs((a(t,n) - d(t,n))/d(t,n)) <= .25
                %    disp('decrease P')
                %    p.epsilon = p.epsilon / p.decreaseP(n);
                %end
            else
                a(t,n) = rand * aFeasible;
                if p.debugOn
                    disp(['Agent: ' num2str(n) 'choose totally random at ',num2str(t), ' : a(t): ' num2str(a(t,n))])
                    hold on
                    subplot(2,1,n),plot(a(t,n), 0,'r+')
                    hold off
                    %keyboard
                end
            end

            p.actionV{n}(t) = a(t,n);
            % p.epsilon - update epsilon with a reward thingy. Example:
            % decrease epsilon quicker when reward (e.g. annual) is
            % improved...
        end
        %% Reward
        if ~p.hydroGuy(n)
            rAcc(t,n) = - abs(a(t,n) - d(t,n));
            if ~mod(t,p.harvestTime)
                csR(ceil(t/p.nDays),n) = min(min(1 - p.yieldResponseFactor * ...
                    (1 - cumsum(a(t - p.harvestTime + 1 : t,n))./cumsum(d(t - p.harvestTime + 1 : t,n)))),1);
                r(t,n) = csR(ceil(t/p.nDays),n) * p.maxCropP;
            else
                r(t,n)=0;
            end
        else % Hydropower producer
            %a(t,n)
            %r(t,n) = V(t,n) / p.Vmax(n);
            %
            %r(t,n) =   abs((a(t,n) - d(t,n))) + V(t,n) / (l(t,n) + 1) / p.dt;
            %r(t,n) = abs((a(t,n) - d(t,n)));
            %r(t,n) = a(t,n);
            %if t>20
            %    keyboard
            %end
            
            %if a(t,n) == 150, keyboard, end
            
            if abs((a(t,n) - d(t,n))/d(t,n)) <= p.precision 
                r(t,n) =  - abs((a(t,n) - d(t,n) - l(t,n)) / aFeasible);
                %r(t,n) =  a(t,n);
                %r(t,n) =  sum(a(1:t,n));
                %keyboard
            %    r(t,n) =  - abs((a(t,n) - d(t,n) + V(t,n)/p.dt)/d(t,n));
                p.epsilon = p.epsilon / p.decreaseP(n);
            else
                %r(t,n) =  - abs((a(t,n) - d(t,n))^2/d(t,n));
                r(t,n) = -1;
            end
            %
            %if a(t,n) == d(t,n)
            %    r(t,n) =  0;
                %r(t,n) =  - abs((a(t,n) - d(t,n) + V(t,n)/p.dt)/d(t,n));
            %else
                %r(t,n) =  - abs((a(t,n) - d(t,n))^2/d(t,n));
            %    r(t,n) =  -1;
            %end
            %disp(a(t))
            %disp(d(t,n))
        end
        %% Update state
        if ~(t==p.tEnd)
            % Water balance
            [V(t+1,n),h(t+1,n),l(t,n)] = watBalRes(V(t,n),q(t,n),a(t,n),qTB(t,n),p,n);

            if p.hydroGuy(n) %&& n < p.nAgents
                qTB(t+1,n+1) = qTB(t+1,n+1) + l(t,n) + a(t,n);
            elseif ~p.hydroGuy(n) %&& n < p.nAgents
                qTB(t+1,n+1) = qTB(t+1,n+1) + l(t,n);
            end

            if strcmp(method,'disc')
                if p.logDimL
                    dimL(t+1,n) = log(abs((q(t+1,n) + qTB(t+1,n) - p.tBFlow * l(t+1,n) + V(t+1,n)/p.dt)) / d(t+1,n)); % new state.
                else
                    if ~p.circular && n == 1
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + p.tBFlow * l(t+1,n) + V(t+1,n)/p.dt)) / d(t+1,n); % new state.
                    elseif p.circular && n==1
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + p.tBFlow * (l(t+1,n)+l(t,end)) + V(t+1,n)/p.dt)) / d(t+1,n); % new state.
                    else
                        dimL(t+1,n) = abs((q(t+1,n) + qTB(t+1,n) + p.tBFlow * l(t+1,n) + V(t+1,n)/p.dt)) / d(t+1,n); % new state.
                    end
                end
                if or(isinf(dimL(t+1,n)),isnan(dimL(t+1,n)))
                    dimL(t+1,n) = p.maxDimL(n); % in case dimL turns all weird we use max attainable dimL.
                end
            end

            if strcmp(method,'GP')
                % p.stateV{n}(t+1,1) = mod(t+1,p.nDays); % already updated
                if p.simpleGPState
                    if ~V(t,n), V(t,n) = .01; end % just to avoid log problems
                    p.stateV{n}(t+1,:) = (abs((q(t,n) + qTB(t+1,n) - p.tBFlow * l(t,n) + V(t,n)/p.dt)) / d(t,n));% * (mod(t,p.nDays)+1));
                else
                    if isinf(log(V(t+1,n)))
                        %p.stateV{n}(t+1,2) = 10^-10; % just something really small, alt. could do dead stoarge.
                    else
                        %p.stateV{n}(t+1,2) = log(V(t+1,n));
                        p.stateV{n}(t+1,2) = V(t+1,n) / p.dt;
                    end
                    %p.stateV{n}(t+1,4) = qTB(t+1,n); % check!
                    p.stateV{n}(t,5) = l(t,n); % check!
                end
            end
            %% update Q
            if strcmp(method,'disc') && p.learn
                iStateNew(n) = sum(dimL(t+1,n) >= p.stateV{n});
                aCat = sum(a(t,n) >= p.actionV{n});
                Q{n}(iState(n),aCat) = (1-p.learnR(n)) * Q{n}(iState(n),aCat) + ...
                    p.learnR(n) * (r(t,n) + p.gamma * max(Q{n}(iStateNew(n),:))); % Q-Learning
                p.exploreP(n) = p.exploreP(n) / p.decreaseP(n); % decrease the probability of exploration.
                %p.learnR(n) = p.learnR0(n) / (1 + p.learnRBeta * t / (size(Q{n},1).*size(Q{n},2))); % learning rate annealing
                p.learnR(n) = p.learnR(n) / (1 + p.learnRBeta * t / (size(Q{n},1).*size(Q{n},2))); % learning rate annealing
                if p.exploreP(n) < p.minExpP(n)
                    p.exploreP(n) = p.minExpP(n); % just ensure a minimal amount of exploration.
                end
                iState = iStateNew;
            end

            if strcmp(method,'GP')
                % here, then, finally comes GPSARSA!
                if t==1
                    % init
                    sens = p.stateV{n}(t,:)';
                    gp{n} = startEpisode(p,gp{n},sens,a(t,n),n);
                else
                    sens = p.stateV{n}(t,:)';
                    sensN = p.stateV{n}(t+1,:)';
                    act = a(t-1,n);
                    actN = a(t,n);
                    reward = r(t,n);
                    [p,gp{n}] = gpSARSA3(p,gp{n},sens,act,reward,sensN,actN,n,t);
                    if p.debugOn
                        keyboard
                    end
                end
            end
        end
    end
end
%% just some graphical output
if p.plotRes
    %     for n = 1 : p.nAgents
    %         h(:,n) = resVH(V(:,n),p.aCoeff(n),p.bCoeff(n),'vh');
    %         figure(n); subplot(2,1,1); title(['Agent_' num2str(n)]);
    %         plot(d(:,n),'r'); hold on; plot(a(:,n),'g'); plot(h(:,n),'k'); plot(q(:,n),'b');
    %         plot(qTB(:,n),'--b');hold off; xlabel('time (years)');
    %         legend('d_{i}(t)','a_{i}(t)','h_{i}(t)','q_{i}(t)','qTB_{i}(t)');
    %         drawnow, datetick('x',11)
    %         subplot(2,1,2); semilogy(dimL(:,n),'c'),xlabel('time (years)'),ylabel('\sigma_{i}(t)');
    %         drawnow, datetick('x',11)
    %     end

    for n = 1 : p.nAgents
    h(:,n) = resVH(V(:,n),p.aCoeff(n),p.bCoeff(n),'vh');
    figure(1);
    subplot(n,1,n); title(['Agent_' num2str(n)]);
    plot(d(:,n),'r'); hold on; plot(a(:,n),'g'); plot(h(:,n),'k'); plot(q(:,n),'b');
    plot(qTB(:,n),'--b');
    
    tMax = find(p.maxEvents>0);
    plot(tMax,p.maxEvents(tMax),'r+');
    plot(cumsum(p.maxEvents),'c+');
    
    hold off; xlabel('time (months)');
    legend(['d_{',num2str(n),'}(t)'],['a_{',num2str(n),'}(t)'],['h_{',num2str(n),'}(t)'],['q_{',num2str(n),'}(t)'],['qTB_{',num2str(n),'}(t)'],'Location','SouthWest');
    drawnow, 
    
    grid minor
    
    %datetick('x',11)
    %subplot(2,1,2);
    %plot(d(:,2),'r'); hold on; plot(a(:,2),'g'); plot(h(:,2),'k'); plot(q(:,2),'b');
    %plot(qTB(:,2),'--b');hold off; xlabel('time (months)');
    %legend('d_{2}(t)','a_{2}(t)','h_{2}(t)','q_{2}(t)','qTB_{2}(t)');
    %drawnow, %datetick('x',4)
    
    figure(2)
    parallelcoords([p.stateV{1} a])
    
    figure(3)
    subplot(1,2,1)
    scatter(r(~~p.maxEvents),a(~~p.maxEvents)), xlabel('reward while MAX'),ylabel('action')
    subplot(1,2,2)
    scatter(r(~p.maxEvents),a(~p.maxEvents)),xlabel('reward while MIN VAR'),ylabel('action')
    end
end
%% return values
out.p = p;
if strcmp(method,'disc'),
    out.Q = Q;
end
%% return variables
out.a = a; out.r = r; out.l = l; out.V = V; out.dimL = dimL; out.h = h;  out.qTB = qTB; out.csR = csR;
out.learnR = p.learnR;
if strcmp(method,'GP')
    out.gp = gp;
end