
% K = SUM_d a_d^2*(K{1,d} * K{2,d}) + s^2*DIAG(W.^2)
%
% THETA = [A_1, THETA_11, THETA_21, A_2, THETA_12, THETA_22, ..., S]

% k = a^2 * k1 * k2 + s^2
%
% K = a^2*kron(K1,K2) + s^2*eye(N1*N2)*diag(W(:).^2)
%
% THIS VERSION: The noise variances may be given fixed scales

function [get_logpdf, get_dlogpdf, get_rand_model, func_theta] = ...
    gp_init_kron5(covfuncs, solvers, logprior, dlogprior, varargin)

% $$$ function [get_logpdf, get_dlogpdf, get_rand_model, func_theta] = ...
% $$$     gp_init_kron5(covfunc1, solver1, covfunc2, solver2, logprior, dlogprior, ...
% $$$                   varargin)


options = struct( ...
    'samplefunc',  [], ...
    'likelihood',  'whitened_prior', ...
    'noise_scale', [], ...
    'rand_y',      'pcg'); % gibbs / pcg

% Parse arguments
[options, errmsg] = argparse( options, varargin{:} );
error(errmsg);

% Number of Kronecker-product covariance functions in the sum
[D_kron, D_sum] = size(covfuncs);
if D_kron ~= 2
  error(['Supporting only products over two domains, that is, COVFUNCS ' ...
         'must have exactly two rows.'])
end

% Check the dimensionalities of the parameter vector for each covariance
% function and compute the indices for each in the joint parameter vector.
N = nan(D_kron,1);
M = nan(D_kron,1);
n_theta = nan(D_kron, D_sum);
ind_theta = cell(D_kron, D_sum);
ind_a = nan(D_sum,1);
ind_s = NaN;
iter = 1;
for j=1:D_sum
  ind_a(j) = iter;
  iter = iter + 1;
  for i=1:D_kron
    %[n_theta(i,j),~,~] = feval(covfuncs{i,j});
    [n_theta(i,j),N(i),M(i)] = feval(covfuncs{i,j});
    ind_theta{i,j} = iter + (1:n_theta(i,j)) - 1;
    iter = iter + n_theta(i,j);
  end
end
ind_s = iter;

% 2-D Kronecker hard coded here..
if isempty(options.noise_scale)
  W = ones(N(2),N(1));
else
  W = reshape(options.noise_scale, N(2), N(1));
end

debug = false;

% Posterior density function
switch options.likelihood
 case 'conditioned'
  get_loglikelihood = @get_loglikelihood_conditioned;
  get_dloglikelihood = @get_dloglikelihood_conditioned;
 case 'whitened_prior'
  get_loglikelihood = @get_loglikelihood_whitened_prior;
  get_dloglikelihood = @get_dloglikelihood_whitened_prior;
% $$$  case 'whitened_approximate'
% $$$   get_loglikelihood = @get_loglikelihood_whitened_approximate;
% $$$   get_dloglikelihood = @get_dloglikelihood_whitened_approximate;
 otherwise
  error('Unknown likelihood')
end

get_logpdf = @get_logposterior;
get_dlogpdf = @get_dlogposterior;
get_rand_model = @get_randfunc;
func_theta = @covstructfunc;

  function logpost = get_logposterior(f_theta)
  lprior = logprior(f_theta.theta);
  loglike = get_loglikelihood(f_theta);
  logpost = @logposterior;
    function lpost = logposterior(varargin)
    llike = loglike(varargin{:});
    lpost = lprior + llike;
    end
  end

  function dlogpost = get_dlogposterior(df_theta)
  dlprior = dlogprior(df_theta.theta);
  dloglike = get_dloglikelihood(df_theta);
  dlogpost = @dlogposterior;
    function dlpost = dlogposterior(varargin)
    dllike = dloglike(varargin{:});
    dlpost = dlprior + dllike;
    end
  end

  
  function func = get_randfunc(rand_theta, covstruct_init)
  % Initialize latent variables
  F = [];

  covstruct = covstruct_init;
  
  switch options.rand_y
   case 'gibbs'
    get_rand_y = @get_rand_y_gibbs;
   case 'pcg'
    get_rand_y = @get_rand_y_pcg;
  end
  rand_y = get_rand_y(covstruct);

  func = @randfunc;
    function Y = randfunc(Y,Imv)
  
    if isempty(F)
      % Use some better initialization here..
      fprintf('Draw Y and F..\n');
      t = cputime();
      [Y, F] = rand_y(Y,Imv);
      %fprintf(' %f seconds\n', cputime()-t);
    end
    
    fprintf('Draw theta..\n');
    t = cputime();

    switch options.likelihood
      
     case 'conditioned'
      % Sample hyperparameters
      error('Not implemented?')
      [~, covstruct_new] = rand_theta(Y, F);
      
     case 'whitened_prior'
      % Transform F to V which has a whitened prior
      U = cell(D_kron, D_sum);
      linsolver = cell(D_kron, D_sum);
      V = zeros([N(2),N(1),D_sum]);
      for j=1:D_sum
        for i=1:D_kron
          U{i,j} = feval(solvers{i,j}.squareroot, covstruct.L{i,j})';
          linsolver{i,j} = @(x) feval(solvers{i,j}.linsolve, covstruct.L{i,j}, x);
        end
        % Compute chol(K)'\F by chol(K)*(K\F) in order to utilize the
        % linsolve-functions
        V(:,:,j) = ...
            kronprod(U{1,j}, ...
                     U{2,j}, ...
                     linsolve_kron(@(x1) linsolver{1,j}(x1), ...
                                   @(x2) linsolver{2,j}(x2), ...
                                   F(:,:,j))) / covstruct.a(j);
      end
% $$$       U1 = solver1.squareroot(covstruct.L1)';
% $$$       U2 = solver2.squareroot(covstruct.L2)';
% $$$       V = kronprod(U1, ...
% $$$                    U2, ...
% $$$                    linsolve_kron(@(x1) solver1.linsolve(covstruct.L1,x1), ...
% $$$                                  @(x2) solver2.linsolve(covstruct.L2,x2), ...
% $$$                                  F)) / covstruct.a;
      
      %disp('Whitened prior')
      
      % Sample hyperparameters
      [~, covstruct_new] = rand_theta(Y, V, Imv);

    end
    
    if ~isequal(covstruct, covstruct_new)
      covstruct = covstruct_new;
      rand_y = get_rand_y(covstruct);
    end
    

    if debug
      disp('randfunc: Rand Y and F')
    end

    fprintf('Draw Y and F..\n');
    t = cputime();
    % Sample Y
    [Y, F] = rand_y(Y,Imv);
    %fprintf(' %f seconds\n', cputime()-t);
    
    % Process samples??
    if ~isempty(options.samplefunc)
      options.samplefunc(Y, F, covstruct.theta);
    end

    end

  end
  
  function [covstruct, dcovstruct] = covstructfunc(theta, varargin)
  
% $$$   ind1 = 1+(1:n_theta1);
% $$$   ind2 = (1+n_theta1)+(1:n_theta2);
  
  thetas = cell(D_kron, D_sum);
  for j=1:D_sum
    for i=1:D_kron
      thetas{i,j} = theta(ind_theta{i,j});
    end
  end
  
% $$$   theta1 = theta(ind1);
% $$$   theta2 = theta(ind2);
  
  if debug
    disp('covstructfunc: start')
  end

  if nargin == 1
    
    covstruct.theta = theta;
    covstruct.a = theta(ind_a);
    covstruct.s = theta(ind_s);
    
    %covfuncs
    %thetas
    if nargout <= 1
      K = cell(D_kron, D_sum);
      for j=1:D_sum
        for i=1:D_kron
          K{i,j} = feval(covfuncs{i,j}, thetas{i,j});
        end
      end
% $$$       K1 = covfunc1(theta1);
% $$$       K2 = covfunc2(theta2);
    else
      K = cell(D_kron, D_sum);
      dK = cell(D_kron, D_sum);
      for j=1:D_sum
        for i=1:D_kron
          [K{i,j},dK{i,j}] = feval(covfuncs{i,j}, thetas{i,j});
        end
      end
% $$$       [K1, dK1] = covfunc1(theta1);
% $$$       [K2, dK2] = covfunc2(theta2);
    end
    
    covstruct.K = K;
% $$$     covstruct.K1 = K1;
% $$$     covstruct.K2 = K2;
    
    covstruct.L = cell(D_kron, D_sum);
    for j=1:D_sum
      for i=1:D_kron
        covstruct.L{i,j} = feval(solvers{i,j}.decompose, covstruct.K{i,j});
      end
    end

% $$$     covstruct.L1 = solver1.decompose(covstruct.K1);
% $$$     covstruct.L2 = solver2.decompose(covstruct.K2);
    
    if nargout >= 2
      dcovstruct = covstruct;
      dcovstruct.dK = dK;
% $$$       dcovstruct.dK1 = dK1;
% $$$       dcovstruct.dK2 = dK2;
    end
    
  else
    
    if nargout > 1
      error('Not implemented for getting the gradient');
    end
    
    ind = varargin{1};
    covstruct_old = varargin{2};
    
    covstruct = covstruct_old;
    
    covstruct.theta = theta;
    covstruct.a = theta(ind_a);
    covstruct.s = theta(ind_s);
    
    for j=1:D_sum
      for i=1:D_kron
        if any(ismember(ind, ind_theta{i,j}))
          covstruct.K{i,j} = feval(covfuncs{i,j}, thetas{i,j});
          covstruct.L{i,j} = feval(solvers{i,j}.decompose, covstruct.K{i,j});
        end
      end
    end
    
% $$$     if any(ismember(ind, ind1))
% $$$       covstruct.K1 = covfunc1(theta1);
% $$$       covstruct.L1 = solver1.decompose(covstruct.K1);
% $$$     end
% $$$     if any(ismember(ind, ind2))
% $$$       covstruct.K2 = covfunc2(theta2);
% $$$       covstruct.L2 = solver2.decompose(covstruct.K2);
% $$$     end
    
  end

  if debug
    disp('covstructfunc: end')
  end

  end
  
  function func = get_rand_y_pcg(covstruct)
  L = cell(D_kron, D_sum);
%  K = cell(D_kron,1);
%  K(:) = {0};
  for j=1:D_sum
    for i=1:D_kron
      L{i,j} = feval(solvers{i,j}.squareroot, covstruct.L{i,j});
%      K{i} = K{i} + covstruct.a(j)^2 * covstruct.K{i,j};
    end
  end
% $$$   L1 = solver1.squareroot(covstruct.L1);
% $$$   L2 = solver2.squareroot(covstruct.L2);
  
  % multiply_f = @(x) covstruct.a^2*kronprod(K{1}, K{2}, x);
% $$$   multiply_f = @(x) covstruct.a^2*kronprod(covstruct.K1, covstruct.K2, x);
  s2W2 = covstruct.s^2*W.^2;
  %multiply_y = @(x) (kronprod(K{1},K{2},x) + s2W2.*x);
  func = @rand_y_pcg;
% $$$   multiply_y = @(x) (multiply_f(x) + covstruct.s^2*W.^2.*x);
    function [Y,F] = rand_y_pcg(Y, Imv)
    % Draw F from the prior
    F = zeros([size(Y),D_sum]);
    for j=1:D_sum
      F(:,:,j) = covstruct.a(j) * gaussian_rand_kron(L{1,j},L{2,j});
    end
    % Draw Y (and subtract the observed matrix)
    Y0 = sum(F,3) + covstruct.s*W.*randn(size(Y)) - Y;
    % Solve CG once
    Z = conjgradmv(@multiply_y, ...
                   Y0, ...
                   ~Imv, ...
                   'maxiter', 1e3, ...
                   'tol', 1e-6, ...
                   'verbose', true);
    % Transform prior samples to posterior samples
    for j=1:D_sum
      F(:,:,j) = F(:,:,j) - covstruct.a(j)^2 * kronprod(covstruct.K{1,j}, ...
                                                        covstruct.K{2,j}, ...
                                                        Z);
    end
    
% $$$       F = covstruct.a * gaussian_rand_kron(L1,L2);
% $$$     F = gaussian_rand_conjgradmv(multiply_f, ...
% $$$                                  multiply_y, ...
% $$$                                  F, ...
% $$$                                  F + covstruct.s*W.*randn(size(Y)) - Y, ...
% $$$                                  ~Imv, ...
% $$$                                  'maxiter', 1e3, ...
% $$$                                  'tol', 1e-6, ...
% $$$                                  'verbose', true);
    %sumF = sum(F,3);
    %Y(Imv) = sumF(Imv) + covstruct.s*W(Imv).*randn(size(Y(Imv)));
    %Y(Imv) = 0;
      function y = multiply_y(x)
      % Multiply matrix X by the full observation covariance matrix
      y = 0;
      for j=1:D_sum
        y = y + covstruct.a(j)^2 * kronprod(covstruct.K{1,j}, ...
                                            covstruct.K{2,j}, ...
                                            x);
      end
      y = y + s2W2.*x;
      end
    end
  end

  
  function func = get_loglikelihood_whitened_prior(covstruct)
  L = cell(D_kron, D_sum);
  for j=1:D_sum
    for i=1:D_kron
      L{i,j} = feval(solvers{i,j}.squareroot, covstruct.L{i,j});
    end
  end
% $$$   L1 = solver1.squareroot(covstruct.L1);
% $$$   L2 = solver2.squareroot(covstruct.L2);
%  ldet_f = 0; %N2*ldet1 + N1*ldet2 + 2*N2*N1*log(covstruct.a);
  func = @loglikelihood_whitened_prior;
    function x = loglikelihood_whitened_prior(Y, V, Imv)
    N_tot = sum(~Imv(:));
% $$$     N_tot = prod(N); % N1*N2;
    ldet_y = 2*N_tot*log(covstruct.s) + 2*sum(log(W(~Imv)));
    % Transform whitened variable V to the model variable F
    F = zeros(size(Y));
    for j=1:D_sum
      F = F + covstruct.a(j) * kronprod(L{1,j},L{2,j},V(:,:,j));
    end
    % Compute logpdf (ignore missing values)
    Y0 = (Y - F) ./ W;
    Y0(Imv) = 0;
    x = gaussian_logpdf((Y0(:)'*Y0(:)) / (covstruct.s^2), ...
                        0, ...
                        0, ...
                        ldet_y, ...
                        N_tot);
    end
  end
  
  
end
