function [] = CVP(subsize)
% paranoid CV_etc

D = load('gp_data');
D = D(randperm(size(D,1)),:); % shuffle it to randomise trials
folds = 3; % or whatever
dim = size(D); 
size_fold = floor(dim(1)/folds); 
n_learn = (folds-1)*size_fold; % training data
if nargin >0
  n_appr = min(n_learn, subsize);
else
  n_appr = n_learn;
end
ntot = n_learn+size_fold; % total data
D = D(1:ntot,:); % throw away data if need be
dim = dim(2)-1; % i.e. data are from a dim-dimensional space; the -1 is not counting the class label
n_class = max(D(:,dim+1));
l = -24; % magical: because it looks pretty good
% question: this becomes unhappy and throws divide by zero warnings if l is outside a small range, eg l = 5 or l = -15. why?
% at least I don't *think* this is strictly a foxtrot uniform
func = @covSEard;
Hyps = load('gp_hyps'); % just so it's the right size etc
%hyps = zeros(1, prod(size(Hyps)));
hyps = zeros(1, prod(size(Hyps)));
htrue = Hyps;
hyp_sq_error = 0;

correctr = corrects = 0;
selections = zeros(subsize,folds * 6);

approxF = zeros(n_appr*n_class, 1);

for fold_number = 1:folds

  test_range = (fold_number-1)*size_fold+1:fold_number*size_fold;
  xs_star = D(test_range,1:dim); % test points
  ys_star = D(test_range,1+dim); % their labels
  
  learn_set = [D(1:(fold_number-1)*size_fold,:);D(1+fold_number*size_fold:ntot,:)];
  D_appr = randperm(n_learn); I_init = D_appr(1:10);
  D_rand = choose_subsetP(I_init, learn_set, n_class, n_appr, 0, func, hyps);
  D_appr = D_rand;
  cols = 6*fold_number;
  selections(:, cols-5:cols-3) = D_appr(:,1:2);

  X = D_appr(:, 1:dim);  
  Y = D_appr(:, 1+dim);
  y = reshape(kron(ones(n_appr,1),1:n_class),n_appr*n_class,1)==repmat(Y,n_class,1);

  %hyps = zeros(1, prod(size(Hyps)));
  % if we don't reset, it is faster to converge next time
  hyps = minimize(hyps, @eq_3_44, l, func, n_appr, n_class, X, y, approxF);
  l = -12; % see?
  Hyps = reshape(hyps, length(hyps)/n_class, n_class);

  hyp_sq_error = hyp_sq_error + sum(sum((Hyps - htrue) .^ 2));

  K = zeros(n_appr,n_appr,n_class);
  ks_star = zeros(n_appr, n_class, size_fold);
  
  % this can't be further vectorised, can it?
  sigma_noise = 1e-5;
  for c = 1:n_class
    K(:,:,c) = func(Hyps(:,c), X, X) + sigma_noise*eye(n_appr);
    ks_star(:,c,:) = func(Hyps(:,c), X, xs_star);
  end  

  %fprintf('Computing f and pi of iteration #%i...\n', fold_number);
  [approxF, pi, E, M] = alg_3_3(n_appr, n_class, K, y, approxF);
  % time for Alg. 3.4
  % note lines 1-7 are already done; this is whence the test/test:= construct
  % in alg_3_3, rather than a generic for loop

  correctr = correctr + alg_3_4(E, func, Hyps, ks_star, M, n_appr, n_class, reshape(pi,n_appr,n_class), size_fold, xs_star, reshape(y,n_appr,n_class), ys_star);

  D_smart = choose_subsetP(I_init, learn_set, n_class, n_appr, 1, func, hyps);
  D_appr = D_smart;
  selections(:, cols-2:cols) = D_appr(:,1:2);

  X = D_appr(:, 1:dim);  
  Y = D_appr(:, 1+dim);
  y = reshape(kron(ones(n_appr,1),1:n_class),n_appr*n_class,1)==repmat(Y,n_class,1);

  %hyps = zeros(1, prod(size(Hyps)));
  % if we don't reset, it is faster to converge next time
  hyps = minimize(hyps, @eq_3_44, l, func, n_appr, n_class, X, y, approxF);
  l = -12; % see?
  Hyps = reshape(hyps, length(hyps)/n_class, n_class);

  hyp_sq_error = hyp_sq_error + sum(sum((Hyps - htrue) .^ 2));

  K = zeros(n_appr,n_appr,n_class);
  ks_star = zeros(n_appr, n_class, size_fold);
  
  % this can't be further vectorised, can it?
  sigma_noise = 1e-5;
  for c = 1:n_class
    K(:,:,c) = func(Hyps(:,c), X, X) + sigma_noise*eye(n_appr);
    ks_star(:,c,:) = func(Hyps(:,c), X, xs_star);
  end  

  %fprintf('Computing f and pi of iteration #%i...\n', fold_number);
  [approxF, pi, E, M] = alg_3_3(n_appr, n_class, K, y, approxF);
  % time for Alg. 3.4
  % note lines 1-7 are already done; this is whence the test/test:= construct
  % in alg_3_3, rather than a generic for loop

  corrects = corrects + alg_3_4(E, func, Hyps, ks_star, M, n_appr, n_class, reshape(pi,n_appr,n_class), size_fold, xs_star, reshape(y,n_appr,n_class), ys_star);

end
%fprintf('I made %i correct predictions, out of a total of %i.\n', correct, ntot);
%fprintf('This is %f percent accuracy.\n', 100*correct/ntot);
dlmwrite('cv_results', [correctr, corrects]);
dlmwrite('points', selections);
%dlmwrite('learnt_hyps_error', hyp_sq_error/(folds * length(htrue)));