%% Skript for setting up peters for simple physical model
%
% The state consists of available runoff q, reservoir level V and demand d
% at time t. We use a compact state representation by means of a dimensionless
% number dimL(t+1)=(q(t+1)+V(t+1)/dt)/d(t+1) for dimensionality reduction.
% h(t) is the reservoir level. The action a(t) is continuous and defined as
% how much water to consumptively utilize at time t.
% the agent tries to maximize the sum(1/(1+delta)^t*-abs(c-d)) which is defined to be the
% total discounted future (rate delta) benefit. the utility he derives from
% the consumption is denoted by u
% the physical system is such that c is only available from the reservoir.
% reservoir levels are determined by the following balance equation.
% h(t+1) = h(t) + ((q-c)*dt/a)^(1/b); 
% any excess water in the reservoir is lost irretrievably.
%
% Tobias Siegfried, 05.15.2007
% - resuming work, 09/29/2008
% - GPSARSA implementation started - 10/04/2008
%% Directory
% to change eventually.
if ispc
    cd('D:\matlab\diegoGame\intContLearn')
elseif ismac
    cd('/Users/tobias/Documents/Science/articles/agentsWater/code');
end
%% Directory CU
if ispc
    cd('D:\matlab\diegoGame\intContLearn')
elseif ismac
    cd('/Users/tobiassiegfried/Documents/Science/articles/agentsWater/trunk/code');
end
%% Parameters
rand('twister',1)
randn('state',1)
% General
clear all
p.learn = 1;
p.keepRS = 1;
p.reuseOldStateV = 1;
p.randS = 2; % random seed
p.yearEcho = 10; % plot each xth year on the screen so as to see simulation progress.
p.nAgents = 1; % number of agents that are acting in a serial setup in a river.

% hydropower guys - in the case of hydropower, the timing is crucial.
% consumptive use is 0.
p.hydroGuy = [1 1];

% Time
p.years = 30; % total simulation time
p.nDays = 12; % number of days per simulation
p.nDaysPerMonth = 30;
p.tEnd = p.years * p.nDays; % total number of timesteps
p.dt = 3600 * 24 * 30; % seconds / decision period.
 
% Runoff generation (SUPPLY)
p.muS = [50 10]; % runoff generator peter (mean runoff) [m^3/s]
p.rhoS = .995; %.995; % lag 1 correlation for Markov process
p.sigmaS = 10; % runoff generator peter (standard deviation to be used for stochastic process)
p.nTS = p.nAgents; % number of runoff timeseries to be generated

% Demand generation (DEMAND)
p.muD = 50; % demand generator, assuming muD demand (mean)
p.sigmaD = 30; % demand generator for seasonal variability
p.dGrowth = .00; % slope of demand growth (linear growth assumed)

% Reservoir
p.Vmax = [3000000000 3000000000]; % Maximum reservoir capacity
p.V0 = [100000000 10]; % initial filling stage
%p.Vmax = [0 0]; % Maximum reservoir capacity
%p.V0 = [0 0]; % initial filling stage
p.aCoeff = [.001 .001]; % parameter to established reservoir level (h) - volume (V) relationship
p.bCoeff = [.5 .5]; % ditto
p.resReleaseConstr = [100 100]; % reservoir release capacity constraint (m^3/s)
p.minV = .01; % dead storage

% Discount factor
p.delta = [.01 .01];

% Learning parameters
p.learnR = [.5 .5];
p.learnRBeta = .01;
p.decreaseP = [1.007 1.003];
p.explorePIni = [.5 .5] ;
p.exploreP = p.explorePIni;
p.minExpP = [10^-3 10^-3];
p.discQ = [100 100]; % action discretization
p.discS = [100 100]; % state discretization
p.smoothR = .6;
p.gammaP = 100;
p.testExplore = zeros(p.tEnd,1);

% Setting up environment
% Supply
p.q = generateRunoffTS(p,1);
%p.q(:,1) = 50 * ones(length(p.q),1);
%p.q(:,2) = 20 * ones(length(p.q),1);
%p.q = 5*abs(ikeda(p.tEnd,1,1,0.0017,0));

% Demand
p.d(:,1) = demandMM(p,p.muD,p.sigmaD,p.tEnd,p.dGrowth)';
%p.d(:,1) = 50 * ones(length(p.q),1);
p.scaleF = 200;
p.d(:,2) = p.scaleF * repmat(diff(tansig(-5:.1/p.nDays*100:5)),1,p.years)' + 1;
p.harvestTime = 9;
p.dAll = p.d;
% This is a dummy for the maximum attainable crop yield at the time of harvest.
p.maxCropP = 1;
p.yieldResponseFactor = 0.8;

% State
p.logDimL = 0;
if p.logDimL
    p.maxDimL = log(max(max(p.q) + p.Vmax/p.dt)./min(p.d)); % log bsc Doom idea;
    p.minDimL = repmat(-30,1,2);
else
    csQ = cumsum(p.q,2);
    for n = 1 : p.nAgents
        p.maxDimL(n) = max(csQ(:,n)+p.Vmax(n)/p.dt)/min(p.dAll(:,n));
        p.minDimL(n) = 0;
    end
end

% Rest
p.minRandGP = 10;
p.countI = 0;
p.QOffset = -99;
p.tBFlow = 1;
% This control switch checks whether or not to restrict max feasible allocation

p.plotRes = 1;
p.circular = 0; % This hypothetical example should lead to non-stationarity for all players.
% Clean up
%clear csQ n
% Run la chose!
%figure(100)
p.gamma = 0.01;

%p.method = 'disc';
p.method = 'GP';

if strcmp(p.method,'GP')
    % p.nFeatures = [1000 1000];
    % New GP stuff
    p.polyKDegree = 2;
    p.precision = .9;
    p.simpleGPState = 0;
    if p.simpleGPState, p.stateVDim = 1; else p.stateVDim = 6; end
    p.actionVDim = 1;
    % stateVec(1): time, stateVec(2): res. level, stateVec(3): recharge, stateVec(4): transborder flow, stateVec(5): reservoir losses, stateVec(6): water demand
    p.currDSize = 0;
    p.sparseP = 0; % nu -The sparsity threshold for rejecting new entries into the dictionary 
    % when using nonparametric updates. A value of 0 is equivalent to using a
    % non-sparsifying representation (and is very computationally expensive).
    p.scalePriorV = 5;
    p.kSigmaS = .5 * p.scalePriorV; 
    p.kSigmaA = 9 * p.scalePriorV;
    p.epsilon0 = 1;
    p.epsilon = p.epsilon0;
    p.sigma = 1; % The prior reward variance. Represents the intrinsic "noise" of the reward process.
    p.gamma = (1./(1+p.delta));
    p.actDisc = 100; % used to compute Q, we use a discretization for the action-space
    p.cPriorV = 0.0000001;
    p.kernels2use = 'pg';
    p.maxEvents = zeros(p.tEnd,1)
end
p.debugOn = 0;
p.reisingerImp = 1;
% Run models
tic;
% res = resLearnAgBoltzmann(p,p.d,p.q,p.method,'undirE'); res = resLearnAgBoltzmann(p,p.d,p.q,p.method,'BE');
res = learnGP1(p,p.d,p.q,p.method,'BE');
p = res.p;
if strcmp(p.method,'GP'), gp = res.gp; end
toc; 
%% compute a slice of the Q-function based on the states and actions
figure(2)
n = 1;
disc = 500;
s = p.stateV{n}(end,:)';
a = max(p.actionV{n});
a = (0:(a/disc):a)';
Q = zeros(disc,1);
V = Q;
d = gp{n}.dict;
maxQ = 0;
for  s1 = 1:1:10
    s1
    s=s1;
    for i = 1 : length(a)
        [Q(i),V(i)] = computeQ(p,gp{n},s,a(i));
    end
    tempMaxQ = max(Q);
    if tempMaxQ > maxQ, maxQ=tempMaxQ; end
    f = [Q+2*sqrt(V);flipdim(Q-2*sqrt(V),1)]; % standard noise-free pointwise errorbars
    fill([a; flipdim(a,1)], f, [7 7 7]/8, 'EdgeColor', [7 7 7]/8);
    hold on
    plot(a,Q,'k-','LineWidth',1); plot(a,Q-2*sqrt(V),'r'); plot(a,Q+2*sqrt(V),'r');
    hold off
    axis([0 max(p.actionV{n}) -150 50]), drawnow
end
maxQ
%% compute a sheet of the Q-function
n = 1;
disc = 50;
a = (0:(max(p.actionV{n})/disc):max(p.actionV{n}))';
Q = zeros(length(a),disc);
V = Q;
s = 0;
for j = 0 : max(p.stateV{n}(:))/disc : max(p.stateV{n}(:))
    s = s + 1;
    for i = 1 : length(a)
        [Q(s,i),V(s,i)] = computeQ(p,gp{n},j,a(i));
        % [Q(i),Vari(i)] = computeQ(p,gp{1},sta,action(i));
    end
end
figure(3)
subplot(1,2,1)
imagesc((Q)), colorbar, title('mean Q')
subplot(1,2,2)
%imagesc(2*sqrt((V))), colorbar, title('var Q')
imagesc(V), colorbar, title('var Q')
%%
figure(4)
scatter(p.actionV{n},p.stateV{n})
%% GA
p.plotRes = 0; xVal = objFunGa(p);
%% use previous Q-values
p.years = 5;
p.tEnd = p.years * p.nDays;
p.q = generateRunoffTS(p,1);
d1 = demandMM(p.muD,p.sigmaD,p.tEnd,p.dGrowth)';
d2 = p.scaleF * repmat(diff(tansig(-5:.1/3.65:5)),1,p.years)'+1;
p.d = [d1 d2];

tt = resLearnAg(p,p.d,p.q,'disc','undirE',res);
%% iterate over a couple of time sections
m = 10;
ttOld = tt;
for m=1:m
    p.q = generateRunoffTS(p,0);
    tt = resLearnAg(p,p.d,p.q,'disc','undirE',ttOld);
    ttOld = tt;
end
%% determine an ensemble of critical crop stage factors
nMax = 200;
for n = 1 : nMax
    tt = resLearnAg(p,p.d,p.q,'disc','undirE');
    res(:,:,n) = tt.csR;
end
%% plotting for the above results (YRC)
figure(101);
yrc = squeeze(res(:,2,:));
mYRC = mean(yrc,2);
stdYRC = std(yrc,[],2);
lowYRC = mYRC - stdYRC;
highYRC = mYRC + stdYRC;
plot(mYRC),hold on;
UT(1:p.years,lowYRC,highYRC,0.9*[1 1 1]); % Han's super duper function
plot(mYRC,'k'),hold on;
hold off; xlabel('time (years)'),ylabel('\mu(YRC) and \sigma(YRC)')
%% Detrend fluctuation analysis
% Calculate anomalies
q = p.q(:,1);
dq = q - mean(q);
for i = 1 : length(q)
    y(i) = sum(dq(1:i));
end
figure
subplot(2,1,1); hold on, plot(q), plot(repmat(mean(q),1,length(q)),'r'),hold off
subplot(2,1,2); plot(y)
%% phase plot
q = rand(10000,1);
q = p.q(:,1);
figure,plot(q,q([1 1:end-1]),'x')
%% DFA
[alpha,intervals,flucts]=fastdfa(p.q(:,1));
figure,loglog(intervals,flucts,'or-');
% lin. regression
aq=polyfit(log(intervals),log(flucts),1);
%% white noise DFA test
wn=rand(10000,1);
[alpha,intervals,flucts]=fastdfa(wn);
figure,loglog(intervals,flucts,'or-');
aWN = polyfit(log(intervals),log(flucts),1)
alpha
%% online DFA-test
tic
tStart = 10000;
for t = tStart : length(p.q(:,1))
    t
    [alpha(t-tStart+1),intervals,flucts]=fastdfa(p.q(1:t,1));
end
toc
figure(101),plot(alpha)
%% test ts change
q = rand(1000,1);
q1 = randn(1000,1)
q = [q;q1];
q = p.q
[alpha,intervals,flucts]=fastdfa(q);
figure(101),plot(q)
figure(100),loglog(intervals,flucts,'or-');
%%
clear alpha
tStart = 1;
windowS =  1;
tic
for t = tStart :windowS : length(q)
    t
    [alpha(t-tStart+1),intervals,flucts]=fastdfa(q(1:t,1));
end
toc
figure(102),plot(alpha(500:end))
%% test
vec=(1:10)';
[alpha,intervals,flucts]=fastdfa(q,vec);
%% Han baby
clear taub h sig Z S sigma sen H
for t=1:1900
    t
    datain = [[1:200] ;q(t:t+200-1)'];
    [taub h sig Z S sigma sen] = MKTest(datain', 0.005);
    H(t)=h;
end
%% Investigate state-action space discretization issues
% Use commulative deficit over a certain simulation time horizon as
% benchmark as well as
maxR = 5;
res = cell(maxR,1);
gridRes = [10 50 100 500 1000];
timeMeasure = zeros(size(gridRes));
deficitRes = zeros(size(p.q,1),maxR);
p.q = p.d;
for sd = 1:maxR
    tic;
    p.discQ = 200; % action discretization
    p.discS = gridRes(sd);
    tt = resLearnAg(p,p.d,p.q,'disc','undirE');
    res{sd} = tt;
    deficitRes(:,sd) = cumsum(abs(tt.p.d(:,1) - tt.a));
    timeMeasure(sd) = toc;
end
figure, plot(deficitRes); legend('n_{\Delta s}=10','n_{\Delta s}=50','n_{\Delta s}=100','n_{\Delta s}=500','n_{\Delta s}=1000');
%% similar for action-space discretization
maxR = 5;
res = cell(maxR,1);
gridRes = [10 50 100 500 1000];
timeMeasure = zeros(size(gridRes));
deficitRes = zeros(size(p.q,1),maxR);
p.q = p.d;
for sd = 1:maxR
    tic;
    p.discS = 500; % action discretization
    p.discQ = gridRes(sd);
    tt = resLearnAg(p,p.d,p.q,'disc','undirE');
    res{sd} = tt;
    deficitRes(:,sd) = cumsum(abs(tt.p.d(:,1) - tt.a));
    timeMeasure(sd) = toc;
end
figure, plot(deficitRes); legend('n_{\Delta a}=10','n_{\Delta a}=50','n_{\Delta a}=100','n_{\Delta a}=500','n_{\Delta a}=1000');
xlabel('time'),ylabel('performance');
%% state and action spaces together at the same time.
maxR = 5;
res = cell(maxR,1);
gridRes = [100 200 300 400 500];
timeMeasure = zeros(size(gridRes));
deficitRes = zeros(size(p.q,1),maxR);
p.q = p.d;
for sd = 1:maxR
    tic;
    p.discS = gridRes(sd); % action discretization
    p.discQ = gridRes(sd);
    tt = resLearnAg(p,p.d,p.q,'disc','undirE');
    res{sd} = tt;
    deficitRes(:,sd) = cumsum(abs(tt.p.d(:,1) - tt.a));
    timeMeasure(sd) = toc;
end
figure, plot(deficitRes); legend('n_{\Delta s,a}=100','n_{\Delta s,a}=200','n_{\Delta s,a}=300','n_{\Delta s,a}=400','n_{\Delta s,a}=500');
xlabel('time'),ylabel('performance');
%% NO LEARNING
% while assuming that the agent always consumes as much as he can so as to
% % cover the demand.
% V = zeros(p.tEnd+1,1);
% a = zeros(p.tEnd,1); % action
% l = a;
% r = a;
% dimL = a;
% V(1) = p.V0;
% for t = 1: p.tEnd
%     if t == 1
%         dimL(t) = abs((q(t) + V(t)/p.dt)) / d(t); % just to get the state at time t=1;
%     end
%     % discuss the availability cases
%     if V(t)/p.dt > d(t)
%         a(t) = d(t);
%     else
%         a(t) = V(t)/p.dt;
%     end
%     V(t+1) = V(t) + q(t)*p.dt - a(t) * p.dt;
%     if V(t+1) < 0,
%         V(t+1) = 0;
%     elseif V(t+1) > p.Vmax
%         l(t) = (V(t+1) - p.Vmax) / p.dt; % l is the loss
%         V(t+1) = p.Vmax;
%     end
%     r(t) = a(t) - d(t);
%     if ~(t==p.tEnd)
%         dimL(t+1) = abs((q(t+1) + V(t+1)/p.dt)) / d(t+1); % new state.
%     end
% end
% h = resVH(V,p.aCoeff,p.bCoeff,'vh');
% V(end) = [];
% % Plot - no learning results
% figure; title('Rule based, no learning')
% subplot(1,2,1);
% plot(q,'b'), hold on; plot(d,'r'); plot(h,'k'); plot(a,'g');hold off;
% xlabel('time');
% subplot(1,2,2)
% semilogy(dimL,'c'); xlabel('time');
% ylabel('state (dimL)');

