function MDP

clear;
nx = 11;
b =  linspace(0,1,nx);
v = zeros(nx,5);
policy = zeros(nx,5);

for i = 1:1
    [v(:,i),policy(:,i)] = Solve_MDP( 0.5, 20, -400, -1);
end;

figure;
subplot(2,1,1);
plot(b,ceil(policy/2))

subplot(2,1,2);
plot(b,v)

%figure;
%plot(0.01:0.01:1, ceil(policy((nx+1)/2,:)/2));


function [v, policy] = Solve_MDP(c,r_p,r_n,r_s)
%--------- user-defined parameters --------------------------------------
isplot = 0;
if nargin == 0
    c = 0.8;
    r_p = 20;
    r_n = -100;
    r_s = -1;
    isplot = 1;
end
alpha = 0.9;                            % discount factor
tol = 1E-8;                            % convergence threshold

% flags specifying problem formulation (only one can be set)
discounted = 0;
average = 1;

% flags specifying algorithm (only one can be set)
valueiteration = 1;
policyiteration = 0;

% set alpha=1 if not discounted
if ~discounted,
    alpha = 1;
end

%-------- make MDP, allocate results ------------------------------------
[P, L] = decisionPOMDP(c,r_p, r_n,r_s);
[nx, nu] = size(L);

% allocate results
v = zeros(nx,1);                        % value function
policy = ones(nx,1);                    % policy
H = zeros(nx,nu);                       % Hamiltonian
PP = zeros(nx,nx);                      % policy-specific transitions
LL = zeros(nx,1);                       % policy-specific costs

iter = 0;

%--------- value iteration ----------------------------------------------
if valueiteration,
    
    while 1,
        vold = v;
        
        % compute Hamiltonian for current v
        for iu = 1:nu
            H(:,iu) = L(:,iu) + alpha*P(:,:,iu)*v;
            %H(:,iu) = L(:,iu) + alpha*P*v;
        end
        
        % update v, compute policy
        [v, policy] = max(H,[],2);
        if average,
            c = mean(v);
            v = v - c;
            H = H - c;
        end
        
        iter = iter + 1;
        
        % check for convergence
        if max(abs(v-vold))<tol,
            break;
        end
    end
    
    
    %--------- policy iteration ---------------------------------------------
elseif policyiteration,
    
    while 1,
        vold = v;
        
        % construct transitions and cost for current policy
        for ix = 1:nx
            PP(ix,:) = P(ix,:,policy(ix));
            LL(ix) = L(ix,policy(ix));
        end
        
        % evaluate current policy
        if discounted,
            v = (eye(nx)-alpha*PP)\LL;
            
        elseif average,
            tmp = [eye(nx)-PP, ones(nx,1); ones(1,nx) 0]\[LL; 0];
            v = tmp(1:nx);
            c = tmp(end);
        end
        
        % compute Hamiltonian using policy-specific v
        for iu = 1:nu
            H(:,iu) = L(:,iu) + alpha*P(:,:,iu)*v;
        end
        
        % update policy and value
        [v, policy] = max(H,[],2);
        
        iter = iter + 1;
        
        % check for convergence
        if max(abs(v-vold))<tol,
            break;
        end
    end
end

if isplot
    iter
    
    figure;
    plot(policy);
    xlim([1,nx]);
    ylim([1,nu]);
    
    figure;
    plot(v);
    hold on;
    plot(H(:,3),'r');
    % plot(H(:,1),'g');
    % plot(H(:,2),'g');
    hold off;
    xlim([1,nx]);
end
