% function ms= vary_algorithm_gold(domain, init_policy, training, testing)
% trains different evaluator algs on training data and tests them on the
% testing data and returns their erros as the result. 
% the error is computed against qgold which is computed by doing rollout. 
% the evaluators are: cmac, rbf and perfect (look at plearned if you want
% to add/remove evaluators). 
%------------------------------------------------------------------------

function ms= vary_algorithm_gold(domain, init_policy, training, testing)

l = size(testing,2); 
tsActions = testing(:,l); 
testing = testing(:,1:(l-1)); 
clear l

%compute gold standard 
global qgold 
if (isempty(qgold))         %IMPORTANT: we compute qgold only if the global qgold variable is empty
    TRIALS = 5; 
    s = size(testing, 1); 
    qgold = zeros(s,1); 
    sim = strcat(domain, '_simulator'); 
    rand('twister', sum(100*clock)); 
    for i=1:s
        vals = zeros(TRIALS,1); 
        for j=1:TRIALS
            [nextst rew] = feval(sim, testing(i,:)', tsActions(i)); 
            [a b] = justexec(nextst, sim, init_policy, 100); 
            vals(j) = rew + init_policy.discount*b; 
            qgold(i) = qgold(i) + rew + init_policy.discount*b;     
        end
        qgold(i) = qgold(i)/TRIALS; 
    end
    
end


global plearned qlearned
plearned = {}; 
qlearned = {}; 

plearned{1} = init_policy; 
plearned{1}.basis = strcat('basis_', domain, '_cmac'); 
plearned{1}.weights = zeros(feval(plearned{1}.basis),1); 
plearned{1}.weights = lsq(training, init_policy, plearned{1}); 


plearned{2} = init_policy; 
plearned{2}.basis = strcat('basis_', domain, '_rbf'); 
plearned{2}.weights = zeros(feval(plearned{2}.basis),1); 
plearned{2}.weights = lsq(training, init_policy, plearned{2}); 

plearned{3} = init_policy; 
plearned{3}.basis = strcat('basis_', domain, '_perfect'); 
plearned{3}.weights = zeros(feval(plearned{3}.basis),1); 
plearned{3}.weights = lsq(training, init_policy, plearned{3}); 


ms = []; 
for i=1: size(plearned,2)
    qlearned{i} = zeros(size(testing,1),1); 
    for j=1:size(testing,1)
      %  a = policy_function(init_policy, testing(j)'); 
        a = tsActions(j); 
        qlearned{i}(j) = Qvalue(testing(j,:)', a, plearned{i});        
    end   
    diff = abs( qgold - qlearned{i}); 
    m = mean(diff); 
    v = var( diff); 
    ms = [ms; m v]; 
end


end

