function y = vl_nnloss(x,c, dzdy, varargin)
%VL_NNLOSS CNN categorical or attribute loss.
%   Y = VL_NNLOSS(X, C) computes the loss incurred by the prediction
%   scores X given the categorical labels C.
%
%   The prediction scores X are organised as a field of prediction
%   vectors, represented by a H x W x D x N array. The first two
%   dimensions, H and W, are spatial and correspond to the height and
%   width of the field; the third dimension D is the number of
%   categories or classes; finally, the dimension N is the number of
%   data items (images) packed in the array.
%
%   While often one has H = W = 1, the case W, H > 1 is useful in
%   dense labelling problems such as image segmentation. In the latter
%   case, the loss is summed across pixels (contributions can be
%   weighed using the `InstanceWeights` option described below).
%
%   The array C contains the categorical labels. In the simplest case,
%   C is an array of integers in the range [1, D] with N elements
%   specifying one label for each of the N images. If H, W > 1, the
%   same label is implicitly applied to all spatial locations.
%
%   In the second form, C has dimension H x W x 1 x N and specifies a
%   categorical label for each spatial location.
%
%   In the third form, C has dimension H x W x D x N and specifies
%   attributes rather than categories. Here elements in C are either
%   `+1` or `-1` and C, where +1 denotes that an attribute is present and
%   `-1` that it is not. The key difference is that multiple attributes
%   can be active at the same time, while categories are mutually
%   exclusive. By default, the loss is *summed* across attributes
%   (unless otherwise specified using the `InstanceWeights` option
%   described below).
%
%   DZDX = VL_NNLOSS(X, C, DZDY) computes the derivative of the block
%   projected onto the output derivative DZDY. DZDX and DZDY have the
%   same dimensions as X and Y respectively.
%
%   VL_NNLOSS() supports several loss functions, which can be selected
%   by using the option `type` described below. When each scalar c in
%   C is interpreted as a categorical label (first two forms above),
%   the following losses can be used:
%
%   Classification error:: `classerror`
%     L(X,c) = (argmax_q X(q) ~= c). Note that the classification
%     error derivative is flat; therefore this loss is useful for
%     assessment, but not for training a model.
%
%   Top-K classification error:: `topkerror`
%     L(X,c) = (rank X(c) in X <= K). The top rank is the one with
%     highest score. For K=1, this is the same as the
%     classification error. K is controlled by the `topK` option.
%
%   Log loss:: `log`
%     L(X,c) = - log(X(c)). This function assumes that X(c) is the
%     predicted probability of class c (hence the vector X must be non
%     negative and sum to one).
%
%   Softmax log loss (multinomial logistic loss):: `softmaxlog`
%     L(X,c) = - log(P(c)) where P(c) = exp(X(c)) / sum_q exp(X(q)).
%     This is the same as the `log` loss, but renormalizes the
%     predictions using the softmax function.
%
%   Multiclass hinge loss:: `mhinge`
%     L(X,c) = max{0, 1 - X(c)}. This function assumes that X(c) is
%     the score margin for class c against the other classes.  See
%     also the `mmhinge` loss below.
%
%   Multiclass structured hinge loss:: `mshinge`
%     L(X,c) = max{0, 1 - M(c)} where M(c) = X(c) - max_{q ~= c}
%     X(q). This is the same as the `mhinge` loss, but computes the
%     margin between the prediction scores first. This is also known
%     the Crammer-Singer loss, an example of a structured prediction
%     loss.
%
%   When C is a vector of binary attribures c in (+1,-1), each scalar
%   prediction score x is interpreted as voting for the presence or
%   absence of a particular attribute. The following losses can be
%   used:
%
%   Binary classification error:: `binaryerror`
%     L(x,c) = (sign(x - t) ~= c). t is a threshold that can be
%     specified using the `threshold` option and defaults to zero. If
%     x is a probability, it should be set to 0.5.
%
%   Binary log loss:: `binarylog`
%     L(x,c) = - log(c(x-0.5) + 0.5). x is assumed to be the
%     probability that the attribute is active (c=+1). Hence x must be
%     a number in the range [0,1]. This is the binary version of the
%     `log` loss.
%
%   Logistic log loss:: `logistic`
%     L(x,c) = log(1 + exp(- cx)). This is the same as the `binarylog`
%     loss, but implicitly normalizes the score x into a probability
%     using the logistic (sigmoid) function: p = sigmoid(x) = 1 / (1 +
%     exp(-x)). This is also equivalent to `softmaxlog` loss where
%     class c=+1 is assigned score x and class c=-1 is assigned score
%     0.
%
%   Hinge loss:: `hinge`
%     L(x,c) = max{0, 1 - cx}. This is the standard hinge loss for
%     binary classification. This is equivalent to the `mshinge` loss
%     if class c=+1 is assigned score x and class c=-1 is assigned
%     score 0.
%
%   VL_NNLOSS(...,'OPT', VALUE, ...) supports these additionals
%   options:
%
%   InstanceWeights:: []
%     Allows to weight the loss as L'(x,c) = WGT L(x,c), where WGT is
%     a per-instance weight extracted from the array
%     `InstanceWeights`. For categorical losses, this is either a H x
%     W x 1 or a H x W x 1 x N array. For attribute losses, this is
%     either a H x W x D or a H x W x D x N array.
%
%   TopK:: 5
%     Top-K value for the top-K error. Note that K should not
%     exceed the number of labels.
%
%   See also: VL_NNSOFTMAX().

% Copyright (C) 2014-15 Andrea Vedaldi.
% Copyright (C) 2016 Karel Lenc.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

% if nargin ==4 %&& ~ischar(varargin{1})  % passed in dzdy
%   dzdy = varargin{2} ;
%   %varargin(1) = [] ;
% else
%   dzdy = [] ;
% end

opts.conserveMemory = false ;
opts.sync = false ;
opts.mode = 'normal' ;
opts.accumulate = false ;
opts.cudnn = true ;
opts.backPropDepth = +inf ;
opts.skipForward = false ;
opts.parameterServer = [] ;
opts.holdOn = false ;
opts.pd_model = [];
opts.loss = 'softmaxlog' ;
opts = vl_argparse(opts, varargin, 'merge') ;
%opts.res = [] ;
opts.instanceWeights = [] ;
opts.classWeights = [] ;
opts.threshold = 0 ;

opts.normalise = false ;
opts.topK = 5 ;


inputSize = [size(x,1) size(x,2) size(x,3) size(x,4)] ;

% Form 1: C has one label per image. In this case, get C in form 2 or
% form 3.
c = gather(c) ;
if numel(c) == inputSize(4)
  c = reshape(c, [1 1 1 inputSize(4)]) ;
  c = repmat(c, inputSize(1:2)) ;
end

hasIgnoreLabel = any(c(:) == 0);

% --------------------------------------------------------------------
% Spatial weighting
% --------------------------------------------------------------------

% work around a bug in MATLAB, where native cast() would slow
% progressively
if isa(x, 'gpuArray')
  switch classUnderlying(x) ;
    case 'single', cast = @(z) single(z) ;
    case 'double', cast = @(z) double(z) ;
  end
else
  switch class(x)
    case 'single', cast = @(z) single(z) ;
    case 'double', cast = @(z) double(z) ;
  end
end

labelSize = [size(c,1) size(c,2) size(c,3) size(c,4)] ;
assert(isequal(labelSize(1:2), inputSize(1:2))) ;
assert(labelSize(4) == inputSize(4)) ;
instanceWeights = [] ;
switch lower(opts.loss)
  case {'classerror', 'topkerror', 'log', 'softmaxlog', 'softmaxlog_relevance', 'mhinge','mshinge_relevance', 'mshinge','mshingesquared',...
          'mshingesquared_relevance', 'euclidean','euclidean_relevance_prod','euclidean_relevance_prodsup1', 'euclidean_relevance_sum'...
          'mshingecubed', 'mshingecubed_relevance'}
    % there must be one categorical label per prediction vector
    assert(labelSize(3) == 1) ;

    if hasIgnoreLabel
      % null labels denote instances that should be skipped
      instanceWeights = cast(c(:,:,1,:) ~= 0) ;
    end

  case {'binaryerror', 'binarylog', 'logistic', 'hinge'}

    % there must be one categorical label per prediction scalar
    assert(labelSize(3) == inputSize(3)) ;

    if hasIgnoreLabel
      % null labels denote instances that should be skipped
      instanceWeights = cast(c ~= 0) ;
    end

  otherwise
    error('Unknown loss ''%s''.', opts.loss) ;
end

if ~isempty(opts.instanceWeights)
  % important: this code needs to broadcast opts.instanceWeights to
  % an array of the same size as c
  if isempty(instanceWeights)
    instanceWeights = bsxfun(@times, onesLike(x, size(c)), opts.instanceWeights) ;
  else
    instanceWeights = bsxfun(@times, instanceWeights, opts.instanceWeights);
  end
end

% --------------------------------------------------------------------
% Do the work
% --------------------------------------------------------------------

switch lower(opts.loss)
  case {'log', 'softmaxlog','softmaxlog_relevance', 'mhinge','mshinge_relevance', 'mshinge','mshingesquared','mshingesquared_relevance',...
        'euclidean','euclidean_relevance_prod','euclidean_relevance_prodsup1', 'euclidean_relevance_sum'...
        'mshingecubed', 'mshingecubed_relevance'}
    % from category labels to indexes
    numPixelsPerImage = prod(inputSize(1:2)) ;
    numPixels = numPixelsPerImage * inputSize(4) ;
    imageVolume = numPixelsPerImage * inputSize(3) ;

    n = reshape(0:numPixels-1,labelSize) ;
    offset = 1 + mod(n, numPixelsPerImage) + ...
             imageVolume * fix(n / numPixelsPerImage) ;

    % ensure indexes are int64, to prevent loss of precision on + operator
    ci = uint64(offset) + numPixelsPerImage * uint64(c - 1) ;
end

if  isempty(dzdy) %if nargin <= 2 || isempty(dzdy)
  switch lower(opts.loss)
    case 'classerror'
      [~,chat] = max(x,[],3) ;
      t_ci = cast(c ~= chat) ;
    case 'topkerror'
      [~,predictions] = sort(x,3,'descend') ;
      t_ci = 1 - sum(bsxfun(@eq, c, predictions(:,:,1:opts.topK,:)), 3) ;
    case 'log'
      t_ci = - log(x(ci)) ;
    case 'softmaxlog'
      Xmax = max(x,[],3) ;
      ex = exp(bsxfun(@minus, x, Xmax)) ;
      t_ci = Xmax + log(sum(ex,3)) - x(ci) ;
    case 'softmaxlog_relevance'
        try
      Xmax = max(x,[],3) ;
      ex = exp(bsxfun(@minus, x, Xmax)) ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); % get the relevance for each label within c (1*1*28*10)
      %relevance_ci = uint64(offset) + numPixelsPerImage * uint64(relevance_c - 1) ;
      t_ci = (Xmax + log(sum(ex,3)) - x(ci)) .* (1 + 2.* relevance_c) ;
        catch ME
            msg= 'error';
        end
    case 'mhinge'
      t_ci = max(0, 1 - x(ci)) ;
    case 'mshinge'
      Q = x ;
      Q(ci) = -inf ;
      t_ci = max(0, 1 - x(ci) + max(Q,[],3)) ;
    case 'mshinge_relevance'
      Q = x ;
      Q(ci) = -inf ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); % get the relevance for each label within c (1*1*28*10)
      %t = max(0, (1 + 1.* relevance_c) .* (1 - x(ci))) ;
      t_ci = max(0, (1 + 2.* relevance_c) .* (1 - x(ci) + max(Q,[],3))) ;
    case 'mshingesquared'
      Q = x ;
      Q(ci) = -inf ;
      t_ci = max(0, (1 - x(ci) + max(Q,[],3)).^2) ; %
    case 'mshingesquared_relevance'
      Q = x ;
      Q(ci) = -inf ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); % get the relevance for each label within c (1*1*28*10)
      %t = max(0, (1 + 1.* relevance_c) .* (1 - x(ci))) ;
      t_ci = max(0, (1 + 2.* relevance_c) .* (1 - x(ci) + max(Q,[],3)).^2 ) ;
    case 'mshingecubed'
      Q = x ;
      Q(ci) = -inf ;
      t_ci = max(0, (1 - x(ci) + max(Q,[],3)).^3) ; %
    case 'mshingecubed_relevance'
      Q = x ;
      Q(ci) = -inf ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); % get the relevance for each label within c (1*1*28*10)
      %t = max(0, (1 + 1.* relevance_c) .* (1 - x(ci))) ;
      t_ci = max(0, (1 + 50.* relevance_c) .* (1 - x(ci) + max(Q,[],3)).^3 ) ;
    case 'euclidean'
        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
        t_ci =(x).^ 2; 
    case 'euclidean_relevance_prod'
       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
       x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       t_ci = 1./(1-relevance_c) .* (x).^ 2; % use idea of 2007 article (alejo et al.)
    case 'euclidean_relevance_prodsup1'
       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
       x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       t_ci = (1+4.*relevance_c) .*(x).^2;%t_ci = (1+2.*relevance_c) .*(x).^ 2; % since 0<relevance_c<1, multiply by 2 to magnify
    case 'euclidean_relevance_sum'
%         relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
%         x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
%         t_ci =(x).^ 2 + 2 .*2 .*relevance_c .* abs(x);
       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
       x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       t_ci = (1+2.*relevance_c) .*(x).^ 2; % since 0<relevance_c<1, multiply by 2 to magnify
    
% %     % L3 DOES NOT WORK
% %         case 'l3'
% %         x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
% %         t_ci =(x).^ 3; 
% %     case 'l3_relevance'
% %        relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
% %        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
% %        t_ci = 1./(1-relevance_c) .* (x).^ 3; % use idea of 2007 article (alejo et al.)

    case 'binaryerror'
      t_ci = cast(sign(x - opts.threshold) ~= c) ;
    case 'binarylog'
      t_ci = -log(c.*(x-0.5) + 0.5) ;
    case 'logistic'
      %t = log(1 + exp(-c.*X)) ;
      a = -c.*x ;
      b = max(0, a) ;
      t_ci = b + log(exp(-b) + exp(a-b)) ;
    case 'hinge'
      t_ci = max(0, 1 - c.*x) ;
      
  end
  if ~isempty(instanceWeights)
    y = instanceWeights(:)' * t_ci(:) ;
  else
      try
    y = sum(t_ci(:));
      catch ME
          msg = 'error';
      end
  end
  
  if opts.normalise && numel(size(x))==4
    y = y ./ size(x,4) ;
  end
  
else
  if ~isempty(instanceWeights)
    dzdy = dzdy * instanceWeights ;
  end
  switch lower(opts.loss)
    case {'classerror', 'topkerror'}
      y = zerosLike(x) ;
    case 'log'
      y = zerosLike(x) ;
      y(ci) = - dzdy ./ max(x(ci), 1e-8) ;
    case 'softmaxlog' 
      Xmax = max(x,[],3) ;
      ex = exp(bsxfun(@minus, x, Xmax)) ;
      y = bsxfun(@rdivide, ex, sum(ex,3)) ;
      y(ci) = y(ci) - 1 ;
      y = bsxfun(@times, dzdy, y) ;
    case 'softmaxlog_relevance' %.* (1+opts.relevance(ci))   
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
      Xmax = max(x,[],3) ;
      ex = exp(bsxfun(@minus, x, Xmax)) ;
      y = bsxfun(@rdivide, ex, sum(ex,3)) ;
      y(ci) = y(ci) - 1 ;
      
% %       % added code, to be placed somewhere else later
% %       
% %       min_yy = min(y(ci));
% %       max_yy = max(y(ci));
% %       med=mean(y(ci));
% %       dev = std(y(ci));
% %       yy=y(ci);
% %       %yy(yy> med+2.*dev & relevance_c <= 0.3) = med+2.*dev;
% %       %yy(yy< med-2.*dev & relevance_c <= 0.3) = med-2.*dev;
% %       %yy( yy<=0.80*(min_yy-max_yy)  & relevance_c <= 0.3) = 0.80*(min_yy-max_yy);
% %       yy( yy<=-0.85  & relevance_c <= 0.3) = -0.85;
% %       y(ci) = yy;
      
%       %to be uncommented later
      relevance_c_ally = reshape(opts.pd_model, 1,1,[]); % get the relevance for each label (each y=neuron) -> (1*1*28)
      relevance_c_ally = repmat(relevance_c_ally, 1,1,1,size(c,4)); % make copies of relevance [1*1*28] to get [1*1*28*10]
      y = y .* (1+ 50.*relevance_c_ally);%y = y .* 1./(1-relevance_c_ally); % 50* rel
      y = bsxfun(@times, dzdy, y) ;
    case 'mhinge'
      y = zerosLike(x) ;
      y(ci) = - dzdy .* (x(ci) < 1) ;
    case 'mshinge'
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
      W = dzdy .* (x(ci) - x(qi) < 1) ;
      y = zerosLike(x) ;
      y(ci) = - W ;
      y(qi) = + W ;
    case 'mshinge_relevance'
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
      %relevance_q = reshape(opts.pd_model(q), 1,1,1,[]); 
      W = dzdy .* (1+ 2 .* relevance_c) .* ( x(ci) -  x(qi) < 1) ;% 50 instead of 2
      y = zerosLike(x) ;
      y(ci) = - W ;
      y(qi) = + W ;
      %
%      case 'mshinge_relevance'
%       Q = x ;
%       Q(ci) = -inf ;
%       [~, q] = max(Q,[],3) ;
%       qi = offset + numPixelsPerImage * (q - 1) ;
%       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
%       %relevance_q = reshape(opts.pd_model(q), 1,1,1,[]); 
%       W = dzdy .* 1./(1-relevance_c) .* ( x(ci) -  x(qi) < 1) ;
%       y = zerosLike(x) ;
%       y(ci) = - W ;
%       y(qi) = + W ;     

    case 'mshingesquared'
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
    t_ci = max(0, 1 - x(ci) + max(Q,[],3)) ;
            
      W = dzdy .* (x(ci) - x(qi) < 1) ;
      y = zerosLike(x) ;
      y(ci) = - W .* t_ci;
      y(qi) = + W .* t_ci;
     
    case 'mshingesquared_relevance'      
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
      t_ci = max(0, 1 - x(ci) + max(Q,[],3)) ;
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
      %relevance_q = reshape(opts.pd_model(q), 1,1,1,[]); 
      W = dzdy .* (1+ 50 .* relevance_c) .* ( x(ci) -  x(qi) < 1) ; %W = dzdy .* (1+ 2 .* relevance_c) .* ( x(ci) -  x(qi) < 1) ;
      y = zerosLike(x) ;
      y(ci) = - W .* t_ci;
      y(qi) = + W .* t_ci;
%      case 'mshingesquared_relevance'      
%       Q = x ;
%       Q(ci) = -inf ;
%       [~, q] = max(Q,[],3) ;
%       qi = offset + numPixelsPerImage * (q - 1) ;
%       t_ci = max(0, 1 - x(ci) + max(Q,[],3)) ;
%       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
%       relevance_q = reshape(opts.pd_model(q), 1,1,1,[]); 
%       W = dzdy .* 1./(1-relevance_c) .* ( x(ci) -  x(qi) < 1) ;%(1+ 2 .* relevance_c)
%       y = zerosLike(x) ;
%       y(ci) = - W .* t_ci;
%       y(qi) = + W .* t_ci;   
    case 'mshingecubed'
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
      t_ci = max(0, (1 - x(ci) + max(Q,[],3)).^2  ) ; %(1-^y_i_t+max ^y_i_k)^2
      W = dzdy .* (x(ci) - x(qi) < 1) ;
      y = zerosLike(x) ;
      y(ci) = - W .* t_ci;
      y(qi) = + W .* t_ci; 
    case 'mshingecubed_relevance'      
      Q = x ;
      Q(ci) = -inf ;
      [~, q] = max(Q,[],3) ;
      qi = offset + numPixelsPerImage * (q - 1) ;
      t_ci = max(0, (1 - x(ci) + max(Q,[],3)).^2  ) ; %(1-^y_i_t+max ^y_i_k)^2
      relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
      W = dzdy .* (1+ 2 .* relevance_c) .* ( x(ci) -  x(qi) < 1) ; %W = dzdy .* (1+ 50 .* relevance_c) .* ( x(ci) -  x(qi) < 1) ;
      y = zerosLike(x) ;
      y(ci) = - W .* t_ci;
      y(qi) = + W .* t_ci;
    case 'euclidean'
        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       y = bsxfun(@times,dzdy , x);
    case 'euclidean_relevance_prod'
       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
       x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       y = bsxfun(@times,dzdy , 1./(1-relevance_c) .* (x)); % use idea of 2007 article (alejo et al.)
    case 'euclidean_relevance_prodsup1'
        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
       relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
       y = bsxfun(@times,dzdy , (1+2.*relevance_c) .* (x)); %y = bsxfun(@times,dzdy , (1+2.*relevance_c) .* (x)); % since 0<relevance_c<1, multiply by 2 to magnify
    case 'euclidean_relevance_sum'
        relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
%         relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
%         x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
%         Xmc = x;
%         Xmc(Xmc < 0) = -1;
%         Xmc(Xmc >= 0) = 1;
%         y = bsxfun(@times,dzdy , (x) + 0.5.*2.* relevance_c.*Xmc); %2 to be tested
       %x(x>1)=1; % in other words, y(y > 0)=0 (see version 4) % most y^_n are greater than 1 =>/- => so turn them to 1
        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
        min_xci = min(x(ci));
        max_xci = max(x(ci));
        med=mean(x(ci)); %
        dev = std(x(ci));
        y=x(ci);
        %% version 1: version 4+ relevance
        %% version 2: doesn't work
        %y(y> med+2.*dev & relevance_c <= 0.3) = med+2.*dev;
        %y(y< med-2.*dev & relevance_c <= 0.3) = med-2.*dev;
        %% version 3: doesn't work
        %y(y< med-1.7.*dev & relevance_c <= 0.3) = med-1.7.*dev; % was *2
        %% version 4: delete extreme y^ + w/out relevance
        y(y > 0 & relevance_c <= 0.3) = 0;
        y(y < -1 & relevance_c <= 0.3) = -1;
        x(ci) = y;
        %% version 5: use probability with exp
        p = exp(y);
        p= p ./ sum(p,3); 
        mean_p = mean(p,3); std_p = std(p,3);
        y(y > 0 & relevance_c <= 0.3 & p>mean_p+2*std_p) = 0; % y is like an outlier
        y(y < -1 & relevance_c <= 0.3 & p>mean_p+2*std_p) = -1;
        % old option 7_old to be uncommented later
         %       y = bsxfun(@times,dzdy , (1+2.*relevance_c) .* (x)); % since 0<relevance_c<1, multiply by 2 to magnify
        %        
       %v = 1:length(x(:));
       %not_ci = v(setdiff(1:end, ci));
       %[index x_not_ci_sorted] = sort(x(not_ci), 'descend');
       y = bsxfun(@times,dzdy , x);
       
% %     CODE DOES NOT WORK WITH L3
% %     case 'l3'
% %        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
% %        x = (x).^2; % delete the coef 3*
% %        y = bsxfun(@times,dzdy , x);
% %     case 'l3_relevance'
% %        relevance_c = reshape(opts.pd_model(c), 1,1,1,[]); 
% %        x(ci)=x(ci)-1; % either x=x-0 for i!=ci// or x=x-1 for i=ci 
% %        x = (x).^2; % delete the coef 3*
% %        y = bsxfun(@times,dzdy , 1./(1-relevance_c) .* (x)); % use idea of 2007 article (alejo et al.)
 
      case 'binaryerror'
      y = zerosLike(x) ;
    case 'binarylog'
      y = - dzdy ./ (x + (c-1)*0.5) ;
    case 'logistic'
      % t = exp(-Y.*X) / (1 + exp(-Y.*X)) .* (-Y)
      % t = 1 / (1 + exp(Y.*X)) .* (-Y)
      y = - dzdy .* c ./ (1 + exp(c.*x)) ;
    case 'hinge'
      y = - dzdy .* c .* (c.*x < 1) ;
  end
  
  if opts.normalise && numel(size(x))==4
    y = y ./ size(x,4) ;
  end
  
end

% --------------------------------------------------------------------
function y = zerosLike(x)
% --------------------------------------------------------------------
if isa(x,'gpuArray')
  y = gpuArray.zeros(size(x),classUnderlying(x)) ;
else
  y = zeros(size(x),'like',x) ;
end

% --------------------------------------------------------------------
function y = onesLike(x, sz)
% --------------------------------------------------------------------
if isa(x,'gpuArray')
  y = gpuArray.ones(sz,classUnderlying(x)) ;
else
  y = ones(sz,'like',x) ;
end