function net = InitNet(  )

%--------------------Hyper-Parameters---------------------
config;
N_s = nnconfig.StageNum;
WD = nnconfig.WeightDecay; 
w_f = nnconfig.w_f;
d = nnconfig.L;
pd = (w_f-1)/2;
layerNum_middle = N_s - 2;
opts.batchNormalization = 0 ;
opts.networkType = 'simplenn' ;
rng('default');
rng(0);
% BS = nnconfig.BatchSize;
% EN = nnconfig.EpochNumber;
% LR = nnconfig.LearningRate;
% SW = nnconfig.imagesize;


%-----------------Parameters initialization-------------------
rho = 0.1;
eta = 1;
f1=sqrt(2 / (w_f *  w_f * d));
f3=sqrt(2 / (w_f *  w_f * d));
%line weight
linew = zeros(length(nnconfig.LinearLabel) , d , 'double');
 for i=1:d
     linew (: , i) = crelu (complex(nnconfig.LinearLabel,nnconfig.LinearLabel));
 end
 
 
%---------------------Network structure---------------------

net.layers = {} ;

%Reconstruction layer_orignal
net.layers{end+1} = struct('type', 'Reorg', ...
                           'weights', {{rho}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {WD}, ...
                           'momentum', {{0}}) ;
%c1
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f1*randn(w_f,w_f,1,d, 'double'), zeros(1, d, 'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, 1, d, 'double'), zeros(1, d, 'double')}}  ) ;                                             
%net.layers{end+1} = struct('type', 'relu') ;
net.layers{end+1} = struct('type', 'Non_linear',...              
                           'weights',{{linew}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{0}});
%c2
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f3*randn(w_f,w_f,d,1, 'double'),  zeros(1,1,'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, d, 1, 'double'), zeros(1, 1, 'double')}}  ) ;                       
net.layers{end+1} = struct('type', 'MIN') ;
net.layers{end+1} = struct('type', 'Multi_org',...               
                           'weights',{{eta}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {WD}, ...
                           'momentum', {{0}}); 
                       
                       
for k=1:layerNum_middle
%Reconstruction layer_middle  
net.layers{end+1} = struct('type', 'Remid', ...
                           'weights', {{rho}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {WD}, ...
                           'momentum', {{0}}) ;                       
net.layers{end+1} = struct('type', 'ADD') ; 
%c1
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f1*randn(w_f,w_f,1,d, 'double'), zeros(1, d, 'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, 1, d, 'double'), zeros(1, d, 'double')}}) ;
% net.layers{end+1} = struct('type', 'relu') ;
net.layers{end+1} = struct('type', 'Non_linear',...              
                           'weights',{{linew}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{0}});
%c2
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f3*randn(w_f,w_f,d,1, 'double'),  zeros(1,1,'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, d,1, 'double'), zeros(1, 1, 'double')}}) ;        
net.layers{end+1} = struct('type', 'MIN') ;
net.layers{end+1} = struct('type', 'Multi_mid',...               
                           'weights',{{eta}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {0}, ...
                           'momentum', {{0}}); 
end
 

net.layers{end+1} = struct('type', 'Remid', ...
                           'weights', {{rho}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {0}, ...
                           'momentum', {{0}}) ;                        
net.layers{end+1} = struct('type', 'ADD') ; 
%c1
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f1*randn(w_f,w_f,1,d, 'double'), zeros(1, d, 'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, 1, d, 'double'), zeros(1, d, 'double')}}) ;
% net.layers{end+1} = struct('type', 'relu') ;
net.layers{end+1} = struct('type', 'Non_linear',...              
                           'weights',{{linew}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{0}});
%c2
net.layers{end+1} = struct('type', 'c_conv', ...
                           'weights', {{f3*randn(w_f,w_f,d,1, 'double'),  zeros(1,1,'double')}}, ...
                           'stride', 1, ...
                           'pad', pd,...
                           'learningRate', ones(1, 2, 'double'), ...
                           'weightDecay', WD, ...
                           'momentum', {{zeros(w_f, w_f, d, 1, 'double'), zeros(1, 1, 'double')}}) ;
net.layers{end+1} = struct('type', 'MIN') ;
net.layers{end+1} = struct('type', 'Multi_final',...               
                           'weights',{{eta}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {0}, ...
                           'momentum', {{0}}); 

                       
%Reconstruction layer_final
net.layers{end+1} = struct('type', 'Refinal', ...
                           'weights', { {rho}},...
                           'learningRate', ones(1, 1, 'double'), ...
                           'weightDecay', {0}, ...
                            'momentum', {{0}}) ;
                    
net.layers{end+1} = struct('type', 'rLoss') ;


% optionally switch to batch normalization
if opts.batchNormalization    
    for i= 4 : 10 : 24
  net = insertBnorm(net, i) ;   
    end
 
end

% Meta parameters         %%%%%%ѵ����������
%net.meta.inputSize = [SW SW 1] ;
%net.meta.trainOpts.learningRate = LR ;
%net.meta.trainOpts.numEpochs = EN ;
%net.meta.trainOpts.batchSize = BS ;%%%%%

% Fill in defaul values
net = vl_simplenn_tidy(net) ;

% Switch to DagNN if requested
switch lower(opts.networkType)
  case 'simplenn'
    % done
  case 'dagnn'
    net = dagnn.DagNN.fromSimpleNN(net, 'canonicalNames', true) ;
    net.addLayer('top1err', dagnn.Loss('loss', 'classerror'), ...
      {'prediction', 'label'}, 'error') ;
    net.addLayer('top5err', dagnn.Loss('loss', 'topkerror', ...
      'opts', {'topk', 5}), {'prediction', 'label'}, 'top5err') ;
  otherwise
    assert(false) ;
end

% --------------------------------------------------------------------
function net = insertBnorm(net, l)
% --------------------------------------------------------------------
assert(isfield(net.layers{l}, 'weights'));
ndim = size(net.layers{l}.weights{1}, 4);
layer = struct('type', 'bnorm', ...
               'weights', {{ones(ndim, 1, 'double'), zeros(ndim, 1, 'double')}}, ...
               'learningRate', [1 1 0.05], ...
               'weightDecay', [0 0],...        
               'momentum', {{zeros(ndim, 1, 'double'), zeros(ndim, 1, 'double'),  zeros(ndim, 2, 'double' ) }}) ;
net.layers{l}.biases = [] ;
net.layers = horzcat(net.layers(1:l), layer, net.layers(l+1:end)) ;
