%% Given known c compute optimal value function and policy
function decisionValue()
nb = 41;
r_p = 20;
r_n = -100;
r_s = -1;
nc = 5;
c = [0.51 0.55 0.6 0.7 0.8];
optV = zeros(nb,nc);
optD = zeros(1,nc);
for i = 1 : nc
[v, d] = decisionValue0(nb, r_p, r_n, r_s, c(i));
[optV(:,i), tmpD] = max(v,[],2);
optD(i) = d(tmpD((nb-1)/2));
end
% 
figure;
subplot(1,2,1);
plot(linspace(0,1,nb),optV);
xlabel('Belief State');
ylabel('V^{*}(b)');
subplot(1,2,2);
plot(2*c - 1, optD,'-o');
xlim([2*c(1)-1, 2*c(nc)-1]);
xlabel('Coherence c');
ylabel('Policy Threshold \theta');

function [V d] = decisionValue0(nb, r_p, r_n, r_s, c)
%************************************************************************
%POMDP VALUE FUNCTION FOR SEQUENTIAL DECISION PROBLEM
%To Infer The Hidden Binary State s = {+1, -1},
%From The Binary Observation State o = {+1, -1}
%Emission model P( o | s ) = (1 + c*o*s) /2
%Inputs:
%        nb, Belief State Space Grid Size
%         c, motion strength, or emission probability
%       r_p, reward for making the correct action
%       r_n, reward for making the incorrect action
%       r_s, reward for sampling
%Outputs:
%         V, value function for different policies d, nb * nd matrix
%         d, 0.5 <= d <= 1, takes action A_R if b > d, A_L if b < 1 - d
%************************************************************************

if nargin < 5
    c = 0.3;
    r_p = 20;
    r_n = -100;
    r_s = -1;
    nb = 41;
end


T = zeros(nb,nb);
b =  linspace(0,1,nb);
OR = c; %Likelihood function = P(o=O_R | s=S_R)
OL = 1 - OR;

%First Compute the transition probability 

for i = 1 : nb %P(s = S_R)
    %Probability that the next belief is b2R
    P_b2R = OR * b(i) + OL * (1 - b(i));
    %Probability that the next belief is b2L
    P_b2L = OL * b(i) + OR * (1 - b(i));
    %Bayesian Belief Update
    if P_b2R ~= 0
        b2R = OR * b(i)/P_b2R; %The next possible belief when o_t+1 = O_R
    else
        b2R = 0;
    end
    if P_b2L ~=0
        b2L = OL * b(i)/P_b2L; %The next possible belief when o_t+1 = O_L
    else
        b2L = 0;
    end
    %Floors and ceils of b2R and b2L
    jR_F = floor(b2R * (nb-1)) + 1;
    jR_C = ceil(b2R * (nb-1)) + 1;
    jL_F = floor(b2L * (nb-1)) + 1;
    jL_C = ceil(b2L * (nb-1)) + 1;
    %Weights on each grid points
    a_R = b2R * (nb - 1) + 1 - jR_F;
    a_L = b2L * (nb - 1) + 1 - jL_F;
    T(i,jR_F) = T(i,jR_F) + P_b2R * (1 - a_R);
    T(i,jR_C) = T(i,jR_C) + P_b2R * a_R;
    
    T(i,jL_F) = T(i,jL_F) + P_b2L * (1 - a_L);
    T(i,jL_C) = T(i,jL_C) + P_b2L * a_L;
end

%Reward function if no sampling is allowed, or the value function when the
%threshold is set to 0.5
R = max(b * r_p + (1 - b) * r_n, b * r_n + ( 1 - b) * r_p);

%Compute the value function for different policies
V = zeros(nb, (nb-1)/2);
d = 1.0 * (nb - 2) / (nb - 1) / 2 + 1.0 * [1:(nb-1)/2] / (nb -1);

for d_i = 1 : (nb-1)/2 %for all possilbe policy    
    m = true(nb,1);  %m(i) if i is a non-terminal state
    %state i is a terminal state if i is beyond the threshold set by the
    %policy
    m(b > d(d_i)) = false; 
    m(b < 1 - d(d_i)) = false;
    
    V(~m,d_i) = R(~m); %Receive reward if already in the terminal state
    V(m, d_i) = (eye(sum(m)) - T(m,m)) \ (T(m,~m) * R(~m)' + r_s);
    
%    Value iteration
%     tol = 1.0e-12;   
%     V0 = ones(nb,1);       
%     while max(abs(V0(m) - V(m,d_i))) > tol  
%         V0 = V(:,d_i);
% %         Value = immediate reward 
% %              + reward if it stays in non terminal states
% %              + value if it jumps to terminal states
%         V(m,d_i) = r_s + T(m,m) * V(m,d_i) + T(m,~m) * R(~m)';
% %         No necessary to compute if the value is greater than the value
% %         when the threshold is set at 0.5
%         if c ==0 && max(V(:,d_i) - R) >= 0
%              break;
%         end
%     end
end
  


