%% Skript for wire-fitted NN Q-learning
% Tobias Siegfried 13.04.2007
%% directory
if ispc
    cd('D:\matlab\agents\intContLearn\')
elseif ismac
    cd('/Users/tobias/Documents/Science/matlab/agents/intContLearn/');
end
%% Parameters
param.randS = 3; % random seed
param.years = 50;
param.muS = 100;
param.rhoS = .9992;
param.sigmaS = 50;
param.tEnd = param.years*365;
param.muD = 100;
param.sigmaD = 75;
param.nTS = 1;
% reservoir parameters
param.aCoeff = .001;
param.bCoeff = .5;
param.V0 = 10000000000; % Volume in m^3
param.Vmax = 10000000000;
% discounting
param.delta = .03;
param.dt = 3600*24; % decision period 1 day assumed here.
% demand growth
param.dGrowth = .002;
% learning
param.learnR = .5;
param.decreaseP = 1.0005;
param.explorePIni = .9 ;
param.discS = 1;
param.discQ = 2;
param.maxDimL = 500; % that is the state variable maximum
%% Setting up environment
% Supply
q = generateRunoffTS(param);
% Demand
% Determine demand. Assumed seasonal variability, no growth.
d = demandMM(param.muD,param.sigmaD,param.tEnd,param.dGrowth)';
plot(q,'b'), hold on; plot(d,'r'); hold off
% The state consists of available runoff q and reservoir level S at a time.
% To characterize both in a compact way, we use dimL=q/h(S) as a
% dimensionless number to characterize the state. h(S) is the reservoir
% level.
% the action is continuous and defined as how much water to consumptively utilize at time
% t.
% the agent tries to maximize the sum(1/(1+delta)^t*-abs(c-d)) which is defined to be the
% total discounted future (rate delta) benefit. the utility he derives from
% the consumption is denoted by u
% the physical system is such that c is only available from the reservoir.
% reservoir levels are determined by the following balance equation.
% h(t+1) = h(t) + ((q-c)*dt/a)^(1/b);
% any excess water in the reservoir is lost irretrevably.
%% NO LEARNING
% while assuming that the agent always consumes as much as he can so as to
% cover the demand.
V = zeros(param.tEnd,1); a = V; l = V;r = V;
V(1) = param.V0;
for t = 1: param.tEnd - 1
    % discuss the availability cases
    totD = d(t)*param.dt;
    if V(t) > totD
        a(t) = totD;
    else
        a(t) = V(t);%*dt;
    end
    V(t+1) = V(t) + q(t)*param.dt - a(t);
    if V(t+1) < 0,
        V(t+1) = 0;
    elseif V(t+1) > param.Vmax
        l(t) = V(t+1) - param.Vmax; % l is the loss
        V(t+1) = param.Vmax;
    end
    r(t) = a(t)/param.dt - d(t);
end
a = a / param.dt;
l = l / param.dt;
h = resVH(V,param.aCoeff,param.bCoeff,'vh');
dimL = q./h; % we try to use this dimensionless number as a state representation.
dimL1 = abs(q+V/param.dt)./d;
%% plot
figure; title('Rule based, no learning')
subplot(1,2,1);
plot(q,'b'), hold on; plot(d,'r'); plot(h,'k'); plot(a,'g');hold off;
xlabel('time');
subplot(1,2,2)
semilogy(dimL1,'c'); xlabel('time');
ylabel('state (dimL)');
%% Traditional Q-Learning with discrete action-state space use
% dimensionless state for problem reduction.


%% NN Specification and intitialization
net=newff([0 Vmax],[3,1],{'tansig','purelin'},'traingd');
netA = init(net);
p=[1 2 3 4 5 6 7 8 9];
sim(net,p)

%% LEARNING
SVR = OnlineSVR;
% Set Parameters
SVR = set(SVR,      'C',                    10, ...
    'Epsilon',              0.1, ...
    'KernelType',           'RBF', ...
    'KernelParam',          30, ...
    'AutoErrorTollerance',  true, ...
    'Verbosity',            1, ...
    'StabilizedLearning',   true, ...
    'ShowPlots',            true, ...
    'MakeVideo',            false, ...
    'VideoTitle',           '');
%% routine
for t = 1 : tEnd - 3000
    t
    SVR = Train(SVR, dimL1(t),abs(q(t)-d(t)));
    %ShowInfo (SVR);
    %keyboard
end


