function [I] = choose_subsetP(D1, D, n_class, m, methodix, varargin)

% Find a subset I of size m of data D by method indexed methodix
% 
% D should be a matrix with a single row denoting a single datum
%
% methodix defaults to 0.
% m = 0: random subset
% m = 1: greedy; pick points of maximal variance
% m = 2: pick border points; TBI on condition Edwin says to
% for each class c where |D: D.class == c| == k, pick k/m points;
% the k/m with the highest sum covariance with data from other classes.
% this gets representative proportions, & mostly (?) boundary points
% might do badly for ass. loss fns or noisy data
% varargin :: [cov_function]

switch (methodix)
  case 0
    X = setdiff(1:size(D,1), D1);
    I = X(:,randperm(prod(size(X))));
    I = D([I(1:m-10) D1], :); % hax
  case 1
    func = varargin(1){:};
    hyps = varargin(2){:};
    dim = size(D, 2) - 1;
    n = size(D,1);
    X = D(:, 1:dim);
    Y = D(:, 1+dim);
    y = reshape(kron(ones(n,1),1:n_class),n*n_class,1)==repmat(Y,n_class,1);
    it_size = 10;
    I = D1; % this is done by indexes; because I say so is why
    J = setdiff(1:size(D, 1), I); % the remainder of D, ie D \ I
    approx_f = zeros(it_size * n_class, 1);
    l = -24; % how large should this be, really
    li = length(I);
    lj = length(J);
    sigma_noise = 1e-5;
    
    while li < m
      
      %hyps = minimize(hyps, @eq_3_44, l, func, n_appr, n_class, X, y, approx_f);
      yI = reshape(kron(ones(li,1),1:n_class),li*n_class,1)==repmat(Y(I),n_class,1);
      hyps = minimize(hyps, @eq_3_44, l, func, li, n_class, X(I,:), yI, approx_f);
      Hyps = reshape(hyps, length(hyps)/n_class, n_class);
      l = -12;
      

      for i = 1:it_size
	K = zeros(li,li,n_class); 
	% put this earlier for faster performance(?)
	% no dice - it's O(n^2c) with data reallocation either way
	for c = 1:n_class
	  K(:,:,c) = func(Hyps(:,c), X(I,:), X(I,:)) + sigma_noise*eye(li);
	end
      
        yI = reshape(kron(ones(li,1),1:n_class),li*n_class,1)==repmat(Y(I),n_class,1);
	variances = zeros(lj,1);
	ks_star = func(Hyps, D(I, 1:dim), D(J, 1:dim));
	[approx_f, pi, E, M] = alg_3_3(li, n_class, K, yI);
	for j = 1:lj
	  Sigma = zeros(n_class);
	  for class = 1:n_class
	    b = E(:,:,class) * ks_star(:,j); % n * 1
	    c_vec = M \ (M' \  b); % text has Rs thrown in
	    for class_dash = 1:n_class
	      Sigma(class, class_dash) += (E(:,:,class_dash) * c_vec)' * ks_star(:,j);
	    end
	    Sigma(class, class) = Sigma(class, class) + func(Hyps(:,class), D(j,1:dim), D(j,1:dim)) - b' * ks_star(:,j);
	  end
	  variances(j) = det(Sigma); % I'd say it should be det(Sigma) but, bizarrely, this gives better accuracy
	end
	new_ix = J(find(variances == max(variances))); new_ix = randperm(new_ix); new_ix = new_ix(1);
	I = [I new_ix];
	li = li + 1;
	J = setdiff(J, new_ix);
	lj = lj - 1;
      end  
    end
    
    I = D(I, :);
%      clf;
%      dim = size(I);
%      n_class = max(I(:,dim(2)));
%  
%      for c = 1:n_class
%        vals = I(find(I(:,3)==c),:);
%        s = strcat('@', num2str(c));
%        plot(vals(:,1), vals(:,2), s);
%        hold on;
%      end
%      pause;

  otherwise
    disp('That is not a valid subset selection index; see choose_subset.m for details');
endswitch

return;