'''
This code is based on the tutorial from deeplearning.org

Added momentum updates based on Sutskever et al 2013

References:
                 
    - paper: "On the importance of momentum and initialization in deep learning"
             Sutskever, I., Martens, J., Dahl, G. and Hinton, G. E., ICML 2013
'''

import cPickle
import gzip
import os
import sys
import time
import numpy
import scipy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from logistic_reg import LogisticRegression
from linear_reg import LinearRegression
from mlp import HiddenLayer
from logistic_da import LogisticDA
from linear_da import LinearDA
from theano import sparse as S
from nnutils import rec_linear
from svm import SVM

class NeuralNetwork(object):
    
    def __init__(self,n_ins,hidden_layers_sizes, n_outs,
                 pretrain_layer=None,corruption_levels=None,
                 activation=T.tanh, rng_seed=None,
                 n_iters_earlystop=10,momentum=0.9,
                 autoMomentum=True,
                 isDataInSparseMatrix=False,
                 L1_reg=0.00,L2_reg=0.0001, dropout_rate=None):
        """
        :type n_ins: int 
        :param n_ins: Number of inputs
        
        :type hidden_layer_sizes: list of ints
        :param hiden_layer_sizes: intermediate layers size, must contain
                               at least one value
                               
        :type n_outs: int
        :param n_outs: Number of output nodes
        
        :type pretrain_layer: None or 'logistic' or 'linear'
        :param pretrain_layer: If None, pretraining using auto encoders is not done,
                            If logistic, then logistic auto encoders is used, else if linear,
                            then linear auto encoders is used
                            
        :type corruption_levels: None or list of numbers (Equal to number of hidden layers)
        :param corruption_levels: Corruption levesl used in pretraining
        
        :type activation: e.g. T.tanh, T.sigmoid
        :param activation: Activation to be used for hidden layers
        
        :type numpy_rng: numpy.random.RandomState(...)  
        :param numpy_rng: numpy random number generator
        
        :type n_iters_earlystop: int
        :param n_iters_earlystop: If the validation error is increased this many times then stop
                    the algorithm
        
        :type momentum: float
        :param momentum: Fixed momentum value to be used in every iteration
        
        :type autoMomentum: True or False
        :param autoMomentum: Find momentum automatically if set to True
                        (If autoMomentum == False and momentum = 0, then momentum is not used)
                            
        :type L1_reg: float
        :param L1_reg: Coefficient for L1 regularization
        
        :type L2_reg: float
        :param L2_reg: Coefficient for L2 regularization
         
        :type isDataInSparseMatrix: True/False
        :param isDataInSparseMatrix: True if data is sparse and stored in scipy.csr_matrix
        
        """
        
        self.sigmoid_layers = []
        self.dA_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)
        self.n_outs = n_outs
        self.n_ins = n_ins
        self.pretrain_layer = pretrain_layer
        self.activation = activation
        self.corruption_levels = corruption_levels
        self.L1_reg = L1_reg
        self.L2_reg = L2_reg
        self.isDataInSparseMatrix = isDataInSparseMatrix
        self.n_iters_earlystop = n_iters_earlystop
        self.momentum = momentum
        self.autoMomentum = autoMomentum
        
        assert self.n_layers > 0
        
        
        
        # allocate symbolic variables for the data
        if isDataInSparseMatrix == True:
            self.x = S.csr_matrix('x')
        else:
            self.x = T.matrix('x')
        
        if n_outs > 1:
            self.y = T.ivector('y')
        else:
            self.y = T.vector('y')  
        
        if not rng_seed:
            rng_seed = numpy.random.randint(2**30)

        numpy_rng = numpy.random.RandomState(rng_seed)          
        theano_rng = T.shared_randomstreams.RandomStreams(rng_seed)
        
        
        for i in xrange(self.n_layers):
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]
                
            if i == 0:
                layer_input = self.x
                layer_input_dropout = self.x
                L1rec = None
                L2rec = None
            else:
                layer_input = self.sigmoid_layers[-1].output
                layer_input_dropout = self.sigmoid_layers[-1].output_dropout
                L1rec = self.sigmoid_layers[-1].L1rec
                L2rec = self.sigmoid_layers[-1].L2rec
            
            if dropout_rate != None:
                sigmoid_layer = HiddenLayer(numpy_rng=numpy_rng,
                                            theano_rng = theano_rng,
                                            input=layer_input,
                                            input_dropout=layer_input_dropout,
                                            n_in=input_size,
                                            n_out=hidden_layers_sizes[i],
                                            activation=activation,
                                            L1rec=L1rec,L2rec=L2rec,
                                            dropout_rate=dropout_rate[i])
            else:
                sigmoid_layer = HiddenLayer(numpy_rng=numpy_rng,
                                        theano_rng = theano_rng,
                                        input=layer_input,
                                        input_dropout=layer_input_dropout,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=activation,
                                        L1rec=L1rec,L2rec=L2rec)
                                            
            self.sigmoid_layers.append(sigmoid_layer)
            self.params.extend(sigmoid_layer.params)
            
            if not (pretrain_layer == None):
                if i == 0:
                    if pretrain_layer == 'logistic':
                        dA_layer = LogisticDA(
                                  n_inputs=input_size,
                                  n_hiddens=hidden_layers_sizes[i],
                                  input=layer_input,
                                  sampling = 1,
                                  noise = corruption_levels[i],
                                  W=sigmoid_layer.W,
                                  bhid=sigmoid_layer.b,
                                  activation=activation)
                    else:
                        dA_layer = LinearDA(
                                  input=layer_input,
                                  n_inputs=input_size,
                                  n_hiddens=hidden_layers_sizes[i],
                                  sampling = 1,
                                  noise = corruption_levels[i],
                                  W=sigmoid_layer.W,
                                  bhid=sigmoid_layer.b,
                                  activation=activation)
                else:
                    dA_layer = LogisticDA(n_inputs=input_size,
                                  n_hiddens=hidden_layers_sizes[i],
                                  input=layer_input,
                                  W=sigmoid_layer.W,
                                  bhid=sigmoid_layer.b,
                                  activation=activation)
                
                self.dA_layers.append(dA_layer)
                
    
    
        #Add last layer
        if(n_outs == 1):
            self.outLayer = LinearRegression(
                         input=self.sigmoid_layers[-1].output,
                             input_dropout=self.sigmoid_layers[-1].output_dropout,
                             n_in=hidden_layers_sizes[-1],
                             numpy_rng = numpy_rng, 
                             theano_rng = theano_rng,
                             L1rec=self.sigmoid_layers[-1].L1rec,
                             L2rec=self.sigmoid_layers[-1].L2rec,
                             dropout_rate=dropout_rate[-1])
        else:
            if dropout_rate != None:
                self.outLayer = LogisticRegression(
                             input=self.sigmoid_layers[-1].output,
                             input_dropout=self.sigmoid_layers[-1].output_dropout,
                             n_in=hidden_layers_sizes[-1], n_out=n_outs,
                             numpy_rng = numpy_rng, 
                             theano_rng = theano_rng,
                             L1rec=self.sigmoid_layers[-1].L1rec,
                             L2rec=self.sigmoid_layers[-1].L2rec,
                             dropout_rate=dropout_rate[-1])
                             
#                self.outLayer = SVM(
#                             input=self.sigmoid_layers[-1].output,
#                             input_dropout=self.sigmoid_layers[-1].output_dropout,
#                             n_in=hidden_layers_sizes[-1], n_out=n_outs,
#                             numpy_rng = numpy_rng, 
#                             theano_rng = theano_rng,
#                             dropout_rate=dropout_rate[-1],
#                             L1rec=self.sigmoid_layers[-1].L1rec,
#                             L2rec=self.sigmoid_layers[-1].L2rec)
            else:
                self.outLayer = LogisticRegression(
                         input=self.sigmoid_layers[-1].output,
                         input_dropout=self.sigmoid_layers[-1].output_dropout,
                         n_in=hidden_layers_sizes[-1], n_out=n_outs,
                         numpy_rng = numpy_rng, 
                         theano_rng = theano_rng,
                         L1rec=self.sigmoid_layers[-1].L1rec,
                         L2rec=self.sigmoid_layers[-1].L2rec)
#                self.outLayer = SVM(
#                             input=self.sigmoid_layers[-1].output,
#                             input_dropout=self.sigmoid_layers[-1].output_dropout,
#                             n_in=hidden_layers_sizes[-1], n_out=n_outs,
#                             numpy_rng = numpy_rng, 
#                             theano_rng = theano_rng,
#                             L1rec=self.sigmoid_layers[-1].L1rec,
#                             L2rec=self.sigmoid_layers[-1].L2rec)
            
        self.params.extend(self.outLayer.params)
        
        #Create memory for storing best parameters
        self.best_params = []
        for param in self.params:
            paramShape = param.get_value(borrow=True).shape
            best = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            self.best_params.append(best)
            
        #Create momentum params and temp params
        self.mu = theano.shared(numpy.cast[theano.config.floatX](0))
        self.learning_rate = T.scalar(dtype=theano.config.floatX)
        self.mom_params = []
        self.temp_params = []
        for param in self.params:
            paramShape = param.get_value(borrow=True).shape
            mom = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            temp_param = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            self.mom_params.append(mom)
            self.temp_params.append(temp_param)
                
        self.__finetune_cost = self.outLayer.cost(self.y,L1_reg,L2_reg)
#        self.__finetune_cost = self.outLayer.cost(self.y,C=0.1, 
#                                                  L1_reg = L1_reg, L2_reg = L2_reg)
        self.__errors = self.outLayer.errors(self.y)
        self.__predict = theano.function([self.x],self.outLayer.y_pred)
#        self.__probs = theano.function([self.x],self.outLayer.p_y_given_x)
    
    def __fit_batch(self,batch_index,mu,learning_rate, isClassicalMomentum):
        
        if not isClassicalMomentum:
            #Copy params and update it with mom+param
            for param,temp,mom in zip(self.params,self.temp_params,self.mom_params):
                temp.set_value(self.__inplaceAddVarible(temp.get_value(borrow=True), param.get_value(borrow=True),0,1),borrow=True)
                param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True),mom.get_value(borrow=True), 1, self.mu.get_value()),borrow=True)
                    
            #calculate cost and update momentum params (theano function)
            self.__train_fn(batch_index,mu,learning_rate)
            
            #copy values stored in temp to params
            for param,temp in zip(self.params,self.temp_params):
                param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True), temp.get_value(borrow=True), 0, 1))
        else:
            self.__train_fn(batch_index,mu,learning_rate)
            
        #Update parameters (theano function)
        self.__updateParams()
        
    def __inplaceAddVarible(self,dst,src,dstConst,srcConst):
        # Will perform dst = dst*dstConst + src*srcConst
        dst *= dstConst
        dst += src*srcConst
        return dst
        
    def __copyParamsToBest(self):
        for param,best in zip(self.params,self.best_params):
            best.set_value(self.__inplaceAddVarible(best.get_value(borrow=True),param.get_value(borrow=True),0,1),borrow=True)
#             best.set_value(best.get_value(borrow=True)*0+param.get_value(borrow=True),borrow=True)
#             best.set_value(param.get_value(borrow=True),borrow=True)
    
    def __copyBestToParams(self):
        for param,best in zip(self.params,self.best_params):
            param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True),best.get_value(borrow=True),0,1),borrow=True)
#             param.set_value(param.get_value(borrow=True)*0+best.get_value(borrow=True),borrow=True)
            
    def __build_pretrain_functions(self,train_set_x,valid_set_x,batch_size,learning_rate):
        
        index = T.lscalar('index')  # index to a minibatch
        # number of batches
        n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
        n_batches_valid = valid_set_x.get_value(borrow=True).shape[0] / batch_size
        # begining of a batch, given `index`
        batch_begin = index * batch_size
        # ending of a batch given `index`
        batch_end = batch_begin + batch_size

        pretrain_fns_train = []
        pretrain_fns_valid = []
        for dA in self.dA_layers:
            # get the cost and the updates list
            cost, updates = dA.get_cost_updates(learning_rate)
            
            # compile the theano function
            fn_train = theano.function(inputs=[index],
                                 outputs=cost,
                                 updates=updates,
                                 givens={self.x: train_set_x[index*batch_size:
                                                            (index+1)*batch_size]})
            
#             valid_score_i = theano.function([index],
#                             dA.loss(valid_set_x[index*batch_size:(index+1)*batch_size]))
# 
#             def valid_score():
#                 return [valid_score_i(i) for i in xrange(n_batches_valid)]
                        
            # append `fn` to the list of functions
            pretrain_fns_train.append(fn_train)
#             pretrain_fns_valid.append(valid_score)

#         return pretrain_fns_train,pretrain_fns_valid
        return pretrain_fns_train

    
    def __build_finetune_functions(self,train_set_x,train_set_y,
                                   valid_set_x,valid_set_y,
                                   batch_size, L, isClassicalMomentum):
        
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
        n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

#         n_valid_batches = valid_set_x.shape[0] / batch_size
#         n_train_batches = train_set_x.shape[0] / batch_size
        
        index = T.lscalar('index')  # index to a [mini]batch
        
        #create new value of params
#         nParams = len(self.params)
#         for i in xrange(nParams):
#             self.temp_params[i].set_value(self.params[i].get_value(borrow=True),borrow=True)
#             self.params[i].set_value(self.params[i].get_value(borrow=True)+self.mom_params[i].get_value(borrow=True)*self.mu.get_value(),borrow=True)

       
        
        # compute the gradients with respect to the model parameters
        gparams = T.grad(self.__finetune_cost, self.params)

        # compute list of fine-tuning updates
        momUpdates = []
        for mom_param, gparam in zip(self.mom_params, gparams):
            if isClassicalMomentum:
                momUpdates.append((mom_param, self.mu*mom_param-(1-self.mu)*self.learning_rate*gparam))
            else:
                momUpdates.append((mom_param, self.mu*mom_param-self.learning_rate*gparam))
            
        paramUpdates = []
        print 'L ', L
        for param,mom_param  in zip(self.params,self.mom_params):

            updated_param = param+mom_param            
            
            if param.get_value(borrow=True).ndim == 2:
                squared_norms = T.sum(updated_param**2, axis=1).reshape((updated_param.shape[0],1))
                scale = T.clip(T.sqrt(L / squared_norms), 0., 1.)
                updated_param *= scale
                paramUpdates.append((param,updated_param))
            else:
                paramUpdates.append((param,updated_param))
            
            
#         for param,temp_params,mom_param, gparam in zip(self.params,self.temp_params,self.mom_params, gparams):
#             param.set_value(temp_params.get_value(borrow=True),borrow=True)
#             updates.append((param, param + mom_param))
            
        
        
        given_mu = T.scalar(dtype=self.mu.dtype)
        print given_mu.dtype
        
        #create update function for parameteres
        self.__updateParams = theano.function([],updates=paramUpdates)
        
        #create trainining function
        self.__train_fn = theano.function(inputs=[index,given_mu,self.learning_rate],
              outputs=self.__finetune_cost,
              updates=momUpdates,
              givens={
                self.x: train_set_x[index * batch_size:
                                    (index + 1) * batch_size],
                self.y: train_set_y[index * batch_size:
                                    (index + 1) * batch_size],
                self.mu: given_mu},
              name='train',allow_input_downcast=True,on_unused_input='warn')

        
        valid_score_i = theano.function([index], self.__errors,
              givens={
                 self.x: valid_set_x[index * batch_size:
                                     (index + 1) * batch_size],
                 self.y: valid_set_y[index * batch_size:
                                     (index + 1) * batch_size]},
                      name='valid')

        train_score_i = theano.function([index], self.__errors,
              givens={
                 self.x: train_set_x[index * batch_size:
                                     (index + 1) * batch_size],
                 self.y: train_set_y[index * batch_size:
                                     (index + 1) * batch_size]},
                      name='train_score')
        
        # Create a function that scans the entire validation set
        def valid_score():
            return [valid_score_i(i) for i in xrange(n_valid_batches)]
        
        def train_score():
            return [train_score_i(i) for i in xrange(n_train_batches)]

        return valid_score, train_score
        
    def fit(self,X,Y,valid_X=None,valid_Y=None,
            validation_frequency=None, shuffleData=False,
            valid_ratio = 0.1,
            pretrain_lr=0.01,pretraining_epochs=15,
            finetune_lr=0.01, training_epochs=1000,
            batch_size=100, learning_rate_anneal_ratio=1.0,
            L = None, isClassicalMomentum=False):
        
        #some constants
        mu_max = 0.999
        
        #Shuffle and split data into train and validation sets
        if(valid_X == None or valid_Y == None):
            trainPercent = 1-valid_ratio
            n_samples = X.shape[0]
            if shuffleData == True:
                randInd = numpy.random.permutation(n_samples)
            else:
                randInd = range(n_samples)
            nTrain = int(n_samples*trainPercent)  
            train_X = X[randInd[0:nTrain]]; train_Y = Y[randInd[0:nTrain]]
            valid_X = X[randInd[nTrain:]]; valid_Y = Y[randInd[nTrain:]]
        else:
            n_samples_train = X.shape[0]
            if shuffleData == True:
                randInd = numpy.random.permutation(n_samples_train)
            else:
                randInd = range(n_samples_train)
            train_X = X[randInd]; train_Y = Y[randInd]
            
        
        # Load data into GPU if using gpu
#         train_set_x = train_X; train_set_y = train_Y
#         valid_set_x = valid_X; valid_set_y = valid_Y

        train_set_x = theano.shared(numpy.asarray(train_X,dtype=theano.config.floatX),
                                 borrow=True)
        train_set_y = theano.shared(numpy.asarray(train_Y,dtype=theano.config.floatX),
                                 borrow=True)
        valid_set_x = theano.shared(numpy.asarray(valid_X,dtype=theano.config.floatX),
                                 borrow=True)
        valid_set_y = theano.shared(numpy.asarray(valid_Y,dtype=theano.config.floatX),
                                 borrow=True)

#         train_set_x = theano.shared(train_X,
#                                  borrow=True)
#         train_set_y = theano.shared(numpy.asarray(train_Y,dtype=theano.config.floatX),
#                                  borrow=True)
#         valid_set_x = theano.shared(valid_X,
#                                  borrow=True)
#         valid_set_y = theano.shared(numpy.asarray(valid_Y,dtype=theano.config.floatX),
#                                  borrow=True)

        
        if self.n_outs > 1:
            train_set_y = T.cast(train_set_y, 'int32')
            valid_set_y = T.cast(valid_set_y, 'int32')
            
        n_train_batches = train_set_x.get_value(borrow=True).shape[0]/batch_size
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]/batch_size
        
#         n_train_batches = train_set_x.shape[0]/batch_size
#         n_valid_batches = valid_set_x.shape[0]/batch_size
        
        if(validation_frequency == None):
            validation_frequency = n_train_batches
            
        #Pretraining
        if self.pretrain_layer != None:
            print '... pre-training the model'
            start_time = time.clock()
#             pretrain_fns,pretrain_valid_score_fns = self.__build_pretrain_functions(train_set_x=train_set_x,
#                                                          valid_set_x=valid_set_x,
#                                                     batch_size=batch_size,learning_rate=pretrain_lr)
            pretrain_fns = self.__build_pretrain_functions(train_set_x=train_set_x,
                                                         valid_set_x=valid_set_x,
                                                    batch_size=batch_size,learning_rate=pretrain_lr)
            
            
            for i in xrange(self.n_layers):
                self.__train_layer(pretrain_fns[i], None, n_train_batches, n_valid_batches, pretraining_epochs)
                
            end_time = time.clock()
            print >> sys.stderr, ('The pretraining code ran for %.2fm' % ((end_time - start_time) / 60.))
        
        # Fine tuning
        print '... fine-tuning the model'
        start_time = time.clock()
        finetune_validate_model,train_score = self.__build_finetune_functions(train_set_x,train_set_y,
                                                                   valid_set_x,valid_set_y, 
                                                                   batch_size=batch_size,
                                                                   L = L, 
                                                                   isClassicalMomentum=isClassicalMomentum)
        
        self.__train_layer(finetune_validate_model,train_score, 
                           n_train_batches, n_valid_batches, training_epochs,
                           learning_rate=finetune_lr,
                           learning_rate_anneal_ratio=learning_rate_anneal_ratio,
                           mu_max=mu_max,validation_frequency=validation_frequency,
                           isClassicalMomentum=isClassicalMomentum)
        
        end_time = time.clock()
        print >> sys.stderr, ('The finetuning code ran for %.2fm' % ((end_time - start_time) / 60.))
            
            
    def __train_layer(self,valid_score_fn,train_score_fn,
                      n_train_batches,n_valid_batches,
                      n_epochs,learning_rate,
                      learning_rate_anneal_ratio,
                      mu_max,validation_frequency,
                      isClassicalMomentum):
        
        improvement_threshold = 0.995
        best_validation_loss = numpy.inf
        done_looping = False
        epoch = 0
        num_updates = 0
        check_valid_iter = 0
        errors = numpy.zeros((n_epochs*n_train_batches/validation_frequency+1,2), dtype='float32')
        error_idx = 0
        while (epoch < n_epochs) and (not done_looping):
            
#            if epoch < 500:
#                mu = 0.5*(1. - epoch/500.) + 0.99*(epoch/500.)
#            else:
#                mu = 0.99
            
            sumW = numpy.sum(numpy.sum(self.params[0].get_value(borrow=True)**2, axis=1))
            print 'sumW', sumW
#            print 'epoch = %d out of %d, learning rate: %.4f, momentum: %.4f' \
#            %(epoch,n_epochs, learning_rate, mu)
            epoch = epoch + 1
            for minibatch_index in xrange(n_train_batches):
                num_updates += 1
                
                #calculate momentum automatically
                if self.autoMomentum == True:
                    new_mu = 1-2**(-1-numpy.log2(num_updates/250+1))
                    if new_mu > mu_max:
                        mu = mu_max
                    else:
                        mu = new_mu
                    
                else:
                    mu = self.momentum
                
                self.__fit_batch(minibatch_index, mu, learning_rate, isClassicalMomentum)
#                 train_fn(minibatch_index,mu,learning_rate)
                                  
                # iteration number
                if valid_score_fn != None:
                    if (num_updates) % validation_frequency == 0:
                        # compute zero-one loss on validation set
                        validation_losses = valid_score_fn()
                        this_validation_loss = numpy.mean(validation_losses)
                        errors[error_idx, 1] = this_validation_loss
                        print('epoch %i, minibatch %i/%i, validation error %f %% best=%f num_updates=%i' % \
                                (epoch, minibatch_index + 1, n_train_batches,
                                this_validation_loss * 100.,best_validation_loss*100.,num_updates))
                        
                        if train_score_fn != None:
                            train_error = numpy.mean(train_score_fn())
                            errors[error_idx, 0] = train_error                   
                            print "train error = %f"  % train_error
                    
                        error_idx += 1
                        # if we got the best validation score until now
                        if this_validation_loss < best_validation_loss:
                            #improve patience if loss improvement is good enough
                            if this_validation_loss < best_validation_loss *  \
                               improvement_threshold:
                                check_valid_iter = 0
                                       
                            best_validation_loss = this_validation_loss
                            
                            #Save best params
                            self.__copyParamsToBest()
                    
                        check_valid_iter = check_valid_iter + 1
                        
                        
        
        
                if check_valid_iter > self.n_iters_earlystop:
                    done_looping = True
                    
            learning_rate *= learning_rate_anneal_ratio

        #Copy best parameters into self.params
        self.__copyBestToParams()
#         self.__saveToFile()
        
        
        #Final validation error to check if best params are copied
        validation_losses = valid_score_fn()
        final_valid_loss = numpy.mean(validation_losses)*100
        print 'final validation error = %f' %final_valid_loss
    
    def predict(self,X):
        return self.__predict(X)

    def probs(self,X):
        return self.__probs(X)
    
def test():
    f = gzip.open('../mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    print train_set[0].shape,train_set[1].shape
    f.close()
    
    nn = NeuralNetwork(n_ins=784, hidden_layers_sizes=[1200,1200], n_outs=10, 
                       pretrain_layer=None, corruption_levels=None,
                       L1_reg=0.0,L2_reg=0.0001, activation=rec_linear,
                       momentum=0.0,autoMomentum=True,
                       n_iters_earlystop=100000,
                       dropout_rate = None)
    nn.fit(train_set[0],train_set[1],valid_set[0],valid_set[1],pretraining_epochs=2,
           training_epochs=3000,batch_size=200,
           finetune_lr = 0.001, learning_rate_anneal_ratio=1.0,L=15.0,
           isClassicalMomentum=False)
    predVals = nn.predict(test_set[0])
    print 'test error rate = %f' %numpy.mean(predVals != test_set[1])
if __name__ == '__main__':
    test()
    pass