function [posterior, prior] = initialize_rsl(data, sigma2);
% 
% Creates a prior distribution for the relevant subtask learning model (RSL)
% parameters, and an initial posterior approximation which is basically 
% the same as the prior.
%
% Inputs: 'data' is a cell array, where each element is the training data for
% one task. The first task (data{1}) is the task of interest, the other tasks
% are supplementary tasks. 'sigma2' is a scalar telling the prior variance of
% parameters around a zero mean. 0.05 is a typical value for the variance.
%
% For each task, the training data must be a matrix where the number of rows 
% is the number of training data samples (can be different in each task), and 
% the number of columns depends on the number features as explained below.
% Each row is one data sample, with n_features + 2 columns. The first n_features
% elements are the data features, the element in column "n_features+1" is a
% constant one (1), and the lase element (in column "n_features+2") is a class
% label which is either +1 or -1. 
%
% Note that later, after learning, for test points RSL will predict the class 
% probabilities of class +1, that is, the outputs will be probabilities
% p(class = +1|x) for each test point x. The probabilities of the other class
% are simply p(class = -1|x) = 1 - p(class = +1|x).
%


n_tasks = length(data);
X = data{1}';
n_dim = size(X,1)-1;

% initialize prior
Sigma_U0 = sigma2*eye(n_dim);
theta_U0 = zeros(n_dim,1);
prior{1} = Sigma_U0;
prior{2} = theta_U0;
for S=2:n_tasks,
  Sigma_S0 = sigma2*eye(n_dim);
  theta_S0 = zeros(n_dim,1);
  phi_SA0 = 1;% phi_SA0 = 1;
  phi_SB0 = 1;% phi_SB0 = 1;
  prior{(S-2)*4+3} = Sigma_S0;
  prior{(S-2)*4+4} = theta_S0;
  prior{(S-2)*4+5} = phi_SA0;
  prior{(S-2)*4+6} = phi_SB0; 
end;

% initialize posterior
Sigma_U = prior{1};
theta_U = prior{2};
n_S = size(X,2);
posterior{1} = Sigma_U;
posterior{2} = theta_U;
posterior{3} = zeros(1,n_S);

for S=2:n_tasks,
  X = data{S};
  cs = X(:,end)';
  X = X(:,1:end-1)';
  n_D = size(X,1);
  n_S = size(X,2);
  
  Sigma_S0 = prior{(S-2)*4+3};
  theta_S0 = prior{(S-2)*4+4};
  phi_SA0 = prior{(S-2)*4+5};
  phi_SB0 = prior{(S-2)*4+6};
  
  % update Sigma_S and theta_S
  Sigma_S = Sigma_S0;
  theta_S = theta_S0;
  posterior{(S-2)*7+4} = Sigma_S;
  posterior{(S-2)*7+5} = theta_S;
  
  % update xis_S
  xis_S = sum(X.*((Sigma_S+theta_S*theta_S')*X),1).^0.5;
  posterior{(S-2)*7+9} = xis_S;
  
  % update xis_S
  xis_U = sum(X.*((Sigma_U+theta_U*theta_U')*X),1).^0.5;
  posterior{(S-2)*7+10} = xis_U;    

  % update phi_SA
  phi_SA = phi_SA0;
  posterior{(S-2)*7+6} = phi_SA;
  
  % update phi_SB
  phi_SB = phi_SB0;
  posterior{(S-2)*7+7} = phi_SB;    

  % compute weighing function g(\xi_{S,i,S})
  gs_S = zeros(1, n_S);
  I = find(xis_S == 0);
  gs_S(I) = 1;
  I2 = find(xis_S ~= 0);
  gs_S(I2) = exp(-xis_S(I2));
  gs_S(I2) = 2*(1-gs_S(I2))./(xis_S(I2).*(1+gs_S(I2)));
  
  % compute weighing function g(\xi_{S,i,U})
  gs_U = zeros(1, n_S);
  I = find(xis_U == 0);
  gs_U(I) = 1;
  I2 = find(xis_U ~= 0);
  gs_U(I2) = exp(-xis_U(I2));
  gs_U(I2) = 2*(1-gs_U(I2))./(xis_U(I2).*(1+gs_U(I2)));
  
  % update gammas
  hs1 = 0.5*cs.*(theta_S'*X) - 0.125*gs_S.*sum(X.*((Sigma_S+theta_S*theta_S')*X),1) - ...
        log(1+exp(-xis_S)) - 0.5*xis_S + 0.125*gs_S.*(xis_S.^2) + ...
        psi(phi_SB);
  
  hs2 = 0.5*cs.*(theta_U'*X) - 0.125*gs_U.*sum(X.*((Sigma_U+theta_U*theta_U')*X),1) - ...
        log(1+exp(-xis_U)) - 0.5*xis_U + 0.125*gs_U.*(xis_U.^2) + ...
        psi(phi_SA);
  C = max([max(hs1) max(hs2)]);
  gammas = exp(hs1-C)./(exp(hs1-C)+exp(hs2-C));
  posterior{(S-2)*7+8} = gammas;  
end;
