function [f, pi, E, M, loglik] = alg_3_3(n, n_class, K, y, approxF)
% a script performing Algorithm 3.3 of R&W
% by Mark Norrish, 2011
% f is of form [f1 ; f2 ; ... ; fn_class]
% 

% initialise stuff
E = zeros(n, n, n_class); % I think a 3D matrix makes more sense than a block diagonal one
R = repmat(eye(n), n_class, 1);
if nargin <= 4
  f = zeros(n_class*n, 1);
else
  f = resize(approxF, n*n_class, 1); % take an initial guess; this usu. shaves off a few iterations
end
obj = 1; % the objective function value
objold = 0;
test = 1; % do we still need to loop
epsilon = 1e-7 * n;
Pi = zeros(n_class*n,n);
sigma_noise = 1e-5;
c_vec = zeros(n*n_class, 1);
while test
  test = (abs(obj - objold) > epsilon);
  objold = obj;
  F = reshape(f, n, n_class);
  %compute pi
  expsum = sum(exp(F'));
  pi = (exp(F')./repmat(expsum,n_class,1))';
  
  %compute Pi
  for i = 1:n_class
    Pi((i-1)*n+1:i*n,:) = diag(pi(:,i));
  end

  z = 0;
  M = zeros(n);
  for c = 1:n_class
    sqrtDc = diag(sqrt(pi(:,c)));
    L = chol((1 + sigma_noise)*eye(n) + sqrtDc * K(:,:,c) * sqrtDc)';
    E(:,:,c) = sqrtDc * ( L' \ (L \ sqrtDc) );
    M = M + E(:,:,c);
    z = z + sum(log(diag(L))); % only needed for computing log determinant
  end
  pi_vector = reshape(pi,n_class*n,1);
  M = chol(M)';
  D = diag(pi_vector);
  W = D - Pi * Pi';
  b = W * f + y - pi_vector ; % (n*c,1)
  for c = 1:n_class % yes this is where the more-readable theory falls down
    c_vec(1 + (c-1)*n:c*n) = E(:,:,c) * K(:,:,c) * b(1 + (c-1)*n:c*n);
  end
  a = b - c_vec + reshape(E, n, n*n_class)' * (M' \ (M \ (R' * c_vec))); % this reshape only works given E is symmetric
  for c = 1:n_class
    f(1+(c-1)*n : c*n) = K(:,:,c) * a(1 + (c-1)*n:c*n);
  end
  
  obj = (y - 0.5*a)'*f - sum(log(sum(exp(reshape(f,n,n_class)'))));
end


loglik = -0.5 * a' * f + y' * f - sum(log(sum(exp(F')))) - z;