'''
This code is based on the tutorial from deeplearning.org

Added momentum updates based on Sutskever et al 2013

References:
                 
    - paper: "On the importance of momentum and initialization in deep learning"
             Sutskever, I., Martens, J., Dahl, G. and Hinton, G. E., ICML 2013
'''

import cPickle
import gzip
import os
import sys
import time
import numpy
import scipy
import theano
import theano.tensor as T
import nnutils
import sklearn.linear_model as lm
import sklearn.preprocessing as pp

from theano.tensor.shared_randomstreams import RandomStreams
from logistic_reg import LogisticRegression
from linear_reg import LinearRegression
from mlp import HiddenLayer
from logistic_da import LogisticDA
from linear_da import LinearDA
from theano import sparse as S
from svm import SVM
import dae

from sklearn.datasets import load_diabetes

class NeuralNetwork(object):
    
    def __init_model(self, n_input,
                 hidden_layers_sizes,
                 network_type,
                 n_outs,
                 is_pretrain,
                 corruption_levels,
                 activations,
                 is_sparse_data,
                 sampling,
                 L1_reg,
                 L2_reg,
                 rng_seed,
                 dropout_rate):
        
        # initilization
        self._hidden_layers = []
        self._dA_layers = []
        self._params = []
        self._num_layers = len(hidden_layers_sizes)
        self._is_sparse_data = is_sparse_data
        self._sampling = sampling
        self._network_type=network_type
        
        assert network_type.lower() in {"regression", "classification", "unsupervised"}, \
        "network type must be regression, classification or unsupervised"
        
        if network_type.lower() == "unsupervised":
            self._num_outs = n_input
        else:
            assert n_outs != None, "For classification and regression mode, the number of "+ \
            "output needs to be specified"
            self._num_outs = n_outs
            
        assert len(activations) == self._num_layers + 1, "The number of activation functions " + \
        "need to match the number of hidden layers"
        
        self._num_input = n_input
        self._is_pretrain = is_pretrain
        self._activations = activations
        self._corruption_levels = corruption_levels
        self._L1_reg = L1_reg
        self._L2_reg = L2_reg
        self._is_sparse_data = is_sparse_data
        
        assert self._num_layers > 0, "There needs to be at least one layer for neural network"
        
        # allocate symbolic variables for the data
        if is_sparse_data == True:
            self._x = S.csr_matrix('x')
        else:
            self._x = T.matrix('x')
            
        if network_type.lower() == "classification":
            self._y = T.ivector('y')
        else:
            self._y = T.matrix('y')
            
        
    def __init_params(self, n_input,
                      n_outs,
                      hidden_layers_sizes,
                      dropout_rate,
                      activations,
                      rng_seed):
        # random seeds
        if not rng_seed:
            rng_seed = numpy.random.randint(2**30)

        numpy_rng = numpy.random.RandomState(rng_seed)          
        theano_rng = T.shared_randomstreams.RandomStreams(rng_seed)
        
        activation_dict = {"tanh":T.tanh, "sigmoid":T.nnet.sigmoid, 
                           "linear":None, "rectl":nnutils.rec_linear,
                           "softmax":T.nnet.softmax}
        
        for layer_idx in xrange(self._num_layers):
            if layer_idx == 0:
                input_size = n_input
            else:
                input_size = hidden_layers_sizes[layer_idx - 1]
                
            if layer_idx == 0:
                layer_input = self._x
                layer_input_dropout = self._x
                L1rec = None
                L2rec = None
            else:
                layer_input = self._hidden_layers[-1]._output
                layer_input_dropout = self._hidden_layers[-1]._output_dropout
                L1rec = self._hidden_layers[-1]._L1rec
                L2rec = self._hidden_layers[-1]._L2rec
            
            if dropout_rate != None:
                print "Not yet implemented"
                pass
            else: 
                assert activations[layer_idx] in activation_dict, "The activation function is not valid"
                h_layer = HiddenLayer(numpy_rng=numpy_rng,
                                        theano_rng = theano_rng,
                                        input=layer_input,
                                        input_dropout=layer_input_dropout,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[layer_idx],
                                        activation=activation_dict[activations[layer_idx]],
                                        L1rec=L1rec,L2rec=L2rec)
                
                #Link pretraining layer
                if self._is_pretrain:
                    if self._is_sparse_data:
                        if layer_idx == 0:
                            dA_layer = dae.DAE(n_inputs=input_size, 
                                       n_hiddens=hidden_layers_sizes[layer_idx],
                                       W=h_layer._W, 
                                       c=h_layer._b, 
                                       noise=self._corruption_levels[layer_idx], 
                                       sampling=self._sampling,
                                       rng = theano_rng)
                        else:
                            dA_layer = dae.DAE(n_inputs=input_size, 
                                       n_hiddens=hidden_layers_sizes[layer_idx],
                                       W=h_layer._W, 
                                       c=h_layer._b, 
                                       noise=self._corruption_levels[layer_idx], 
                                       sampling=1.0,
                                       rng = theano_rng)
                    else:
                        dA_layer = dae.DAE(n_inputs=input_size, 
                                       n_hiddens=hidden_layers_sizes[layer_idx],
                                       W=h_layer._W, 
                                       c=h_layer._b, 
                                       noise=self._corruption_levels[layer_idx], 
                                       sampling=1.0,
                                       rng = theano_rng)
                        
                    self._dA_layers.append(dA_layer)
                    
            self._hidden_layers.append(h_layer)
            self._params.extend(h_layer._params)
            
            
        #Add last layer
        if dropout_rate != None:
            print "Not yet implemented"
            pass
        else:
            self._out_layer = HiddenLayer(
                     input=self._hidden_layers[-1]._output,
                     input_dropout=self._hidden_layers[-1]._output_dropout,
                     n_in=hidden_layers_sizes[-1],
                     n_out=n_outs,
                     numpy_rng = numpy_rng, 
                     theano_rng = theano_rng,
                     activation=activation_dict[activations[-1]],
                     L1rec=self._hidden_layers[-1]._L1rec,
                     L2rec=self._hidden_layers[-1]._L2rec)

            
        self._params.extend(self._out_layer._params)
        
        #Create memory for storing best parameters
        self._best_params = []
        for param in self._params:
            paramShape = param.get_value(borrow=True).shape
            best = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            self._best_params.append(best)
        

    def __init__(self, n_input,
                 hidden_layers_sizes,
                 network_type,
                 n_outs = None,
                 is_pretrain=False,
                 corruption_levels=None,
                 activations=None,
                 training_cost="cross_entropy",
                 is_sparse_data=False,
                 sampling=0.01,
                 L1_reg=0.0,
                 L2_reg=0.0001,
                 rng_seed=None,
                 dropout_rate=None):
        
        
        self.__init_model(n_input, hidden_layers_sizes, network_type, n_outs, \
                 is_pretrain, corruption_levels, activations, is_sparse_data, \
                 sampling,L1_reg, L2_reg, rng_seed, dropout_rate)
        
        self.__init_params(n_input, self._num_outs, hidden_layers_sizes, dropout_rate, activations, rng_seed)
    
        
        
        # compile theano functions
        if training_cost == "cross_entropy":
            self.__finetune_cost = self._out_layer.cross_entropy_cost(self._y,L1_reg,L2_reg)
        elif training_cost == "mean_squared":
            self.__finetune_cost = self._out_layer.mean_squared_errors(self._y, L1_reg, L2_reg)
        else:
            assert False, "The type of cost is not supported"
        
        if network_type.lower() == "classification":
            self.__errors = self._out_layer.classification_errors(self._y)
            self.__predict = theano.function([self._x],self._out_layer._y_pred)
            self.__probs = theano.function([self._x],self._out_layer._output)
        else:
            self.__errors = self._out_layer.mean_squared_errors(self._y, 0, 0)
            self.__predict = theano.function([self._x],self._out_layer._output)
            self.__probs = theano.function([self._x],self._out_layer._output)

    
    def __fit_batch(self,batch_index,mu,learning_rate, isClassicalMomentum):
        
        if not isClassicalMomentum:
            #Copy params and update it with mom+param
            for param,temp,mom in zip(self._params,self._temp_params,self._mom_params):
                temp.set_value(self.__inplaceAddVarible(temp.get_value(borrow=True), param.get_value(borrow=True),0,1),borrow=True)
                param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True),mom.get_value(borrow=True), 1, self._mu.get_value()),borrow=True)
                    
            #calculate cost and update momentum params (theano function)
            self.__train_fn(batch_index,mu,learning_rate)
            
            #copy values stored in temp to params
            for param,temp in zip(self._params,self._temp_params):
                param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True), temp.get_value(borrow=True), 0, 1))
        else:
            self.__train_fn(batch_index,mu,learning_rate)
            
        #Update parameters (theano function)
        self.__updateParams()
        
    def __inplaceAddVarible(self,dst,src,dstConst,srcConst):
        # Will perform dst = dst*dstConst + src*srcConst
        dst *= dstConst
        dst += src*srcConst
        return dst
        
    def __copyParamsToBest(self):
        for param,best in zip(self._params,self._best_params):
            best.set_value(self.__inplaceAddVarible(best.get_value(borrow=True),param.get_value(borrow=True),0,1),borrow=True)
#             best.set_value(best.get_value(borrow=True)*0+param.get_value(borrow=True),borrow=True)
#             best.set_value(param.get_value(borrow=True),borrow=True)
    
    def __copyBestToParams(self):
        for param,best in zip(self._params,self._best_params):
            param.set_value(self.__inplaceAddVarible(param.get_value(borrow=True),best.get_value(borrow=True),0,1),borrow=True)
#             param.set_value(param.get_value(borrow=True)*0+best.get_value(borrow=True),borrow=True)
            

    def __pretrain(self,train_set_x,batch_size,learning_rate,n_epochs):
        
        #Build functions
        pretrain_fns = []
        for layer_idx in xrange(self._num_layers):
            dA_layer = self._dA_layers[layer_idx]
            updates_layer = {}
            gparams = T.grad(dA_layer.loss, dA_layer.params)
            for param, gparam in zip(dA_layer.params, gparams):
                updates_layer[param] = param - learning_rate * gparam
             
            fit = theano.function([dA_layer.input], dA_layer.loss,updates=updates_layer)
            pretrain_fns.append(fit)
            
        #pretrain
        n_train = train_set_x.get_value(borrow=True).shape[0]
        n_train_batches = n_train/batch_size
        if(n_train*batch_size < n_train):
            has_extra = True
        else:
            has_extra = False
            
        for layer_idx in xrange(self._num_layers):
            print 'Pretraining layer %d' %(layer_idx)
            if layer_idx == 0:
                layer_input = train_set_x.get_value(borrow=True)
            else:
                layer_input = self._dA_layers[layer_idx-1].encode(layer_input)
                
            for n in xrange(n_epochs):
                avg_train_err = 0
                for index in xrange(n_train_batches):
                    avg_train_err += pretrain_fns[layer_idx](layer_input[index * batch_size:(index + 1) * batch_size])
                if has_extra:
                    avg_train_err += pretrain_fns[layer_idx](layer_input[(index + 1) * batch_size:])
                    avg_train_err /= (n_train_batches+1)
                else:
                    avg_train_err /= (n_train_batches)
                print 'Epoch %d , Train error %f' %(n,avg_train_err)
                    
            
            
    def __build_finetune_functions(self,train_set_x,train_set_y,
                                   valid_set_x,valid_set_y,
                                   batch_size, L, isClassicalMomentum):
        
        ## FIX!!!
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
        n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

#         n_valid_batches = valid_set_x.shape[0] / batch_size
#         n_train_batches = train_set_x.shape[0] / batch_size
        
        index = T.lscalar('index')  # index to a [mini]batch
        
        #create new value of params
#         nParams = len(self.params)
#         for i in xrange(nParams):
#             self.temp_params[i].set_value(self.params[i].get_value(borrow=True),borrow=True)
#             self.params[i].set_value(self.params[i].get_value(borrow=True)+self.mom_params[i].get_value(borrow=True)*self.mu.get_value(),borrow=True)

       
        
        # compute the gradients with respect to the model parameters
        gparams = T.grad(self.__finetune_cost, self._params)

        # compute list of fine-tuning updates
        momUpdates = []
        for mom_param, gparam in zip(self._mom_params, gparams):
            if isClassicalMomentum:
                momUpdates.append((mom_param, self._mu*mom_param-(1-self._mu)*self._learning_rate*gparam))
            else:
                momUpdates.append((mom_param, self._mu*mom_param-self._learning_rate*gparam))
            
        paramUpdates = []
        print 'L ', L
        for param,mom_param  in zip(self._params,self._mom_params):

            updated_param = param+mom_param            
            
            if param.get_value(borrow=True).ndim == 2:
                squared_norms = T.sum(updated_param**2, axis=1).reshape((updated_param.shape[0],1))
                scale = T.clip(T.sqrt(L / squared_norms), 0., 1.)
                updated_param *= scale
                paramUpdates.append((param,updated_param))
            else:
                paramUpdates.append((param,updated_param))
            
            
#         for param,temp_params,mom_param, gparam in zip(self.params,self.temp_params,self.mom_params, gparams):
#             param.set_value(temp_params.get_value(borrow=True),borrow=True)
#             updates.append((param, param + mom_param))
            
        
        
        given_mu = T.scalar(dtype=self._mu.dtype)
        print given_mu.dtype
        
        #create update function for parameteres
        self.__updateParams = theano.function([],updates=paramUpdates)
        
        #create trainining function
        self.__train_fn = theano.function(inputs=[index,given_mu,self._learning_rate],
              outputs=self.__finetune_cost,
              updates=momUpdates,
              givens={
                self._x: train_set_x[index * batch_size:
                                    (index + 1) * batch_size],
                self._y: train_set_y[index * batch_size:
                                    (index + 1) * batch_size],
                self._mu: given_mu},
              name='train',allow_input_downcast=True,on_unused_input='warn')

        
#         valid_score_i = theano.function([index], self.__errors,
#               givens={
#                  self._x: valid_set_x[index * batch_size:
#                                      (index + 1) * batch_size],
#                  self._y: valid_set_y[index * batch_size:
#                                      (index + 1) * batch_size]},
#                       name='valid')
# 
#         train_score_i = theano.function([index], self.__errors,
#               givens={
#                  self._x: train_set_x[index * batch_size:
#                                      (index + 1) * batch_size],
#                  self._y: train_set_y[index * batch_size:
#                                      (index + 1) * batch_size]},
#                       name='train_score')
        
        valid_score_i = theano.function([], self.__errors,
              givens={
                 self._x: valid_set_x,
                 self._y: valid_set_y},
                      name='valid')
 
        train_score_i = theano.function([], self.__errors,
              givens={
                 self._x: train_set_x,
                 self._y: train_set_y},
                      name='train_score')
        
        # Create a function that scans the entire validation set
#         def valid_score():
#             return [valid_score_i(i) for i in xrange(n_valid_batches)]
#         
#         def train_score():
#             return [train_score_i(i) for i in xrange(n_train_batches)]
        def valid_score():
            return valid_score_i()
        
        def train_score():
            return train_score_i()
        
        return valid_score, train_score
        
    def fit(self,X,Y,valid_X=None,valid_Y=None,
            validation_frequency=None, shuffleData=False,
            valid_ratio = 0.1,
            pretrain_lr=0.01,
            pretraining_epochs=15,
            finetune_lr=0.01, 
            training_epochs=1000,
            batch_size=100, 
            learning_rate_anneal_ratio=1.0,
            n_iters_earlystop=10,
            momentum=0.9,
            momentum_max = 0.999,
            auto_momentum=True,
            L = None, 
            isClassicalMomentum=False):
        
        self._n_iters_earlystop = n_iters_earlystop
        #Create momentum params and temp params
        self._mu = theano.shared(numpy.cast[theano.config.floatX](0))
        self._learning_rate = T.scalar(dtype=theano.config.floatX)
        self._mom_params = []
        self._temp_params = []
        self._auto_momentum = auto_momentum
        self._momentum = momentum
        for param in self._params:
            paramShape = param.get_value(borrow=True).shape
            mom = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            temp_param = theano.shared(value=numpy.zeros(paramShape,dtype=theano.config.floatX), borrow=True)
            self._mom_params.append(mom)
            self._temp_params.append(temp_param)
               
        
        #Shuffle and split data into train and validation sets
        if(valid_X == None or valid_Y == None):
            trainPercent = 1-valid_ratio
            n_samples = X.shape[0]
            if shuffleData == True:
                randInd = numpy.random.permutation(n_samples)
            else:
                randInd = range(n_samples)
            nTrain = int(n_samples*trainPercent)  
            train_X = X[randInd[0:nTrain]]; train_Y = Y[randInd[0:nTrain]]
            valid_X = X[randInd[nTrain:]]; valid_Y = Y[randInd[nTrain:]]
        else:
            n_samples_train = X.shape[0]
            if shuffleData == True:
                randInd = numpy.random.permutation(n_samples_train)
            else:
                randInd = range(n_samples_train)
            train_X = X[randInd]; train_Y = Y[randInd]
            
        
        # Load data into GPU if using gpu
        if not scipy.sparse.isspmatrix_csr(train_X):
            train_set_x = theano.shared(numpy.asarray(train_X,dtype=theano.config.floatX),
                                     borrow=True)
            train_set_y = theano.shared(numpy.asarray(train_Y,dtype=theano.config.floatX),
                                     borrow=True)
            valid_set_x = theano.shared(numpy.asarray(valid_X,dtype=theano.config.floatX),
                                     borrow=True)
            valid_set_y = theano.shared(numpy.asarray(valid_Y,dtype=theano.config.floatX),
                                     borrow=True)
        else:
            train_set_x = theano.shared(train_X,
                                     borrow=True)
            train_set_y = theano.shared(train_Y,
                                     borrow=True)
            valid_set_x = theano.shared(valid_X,
                                     borrow=True)
            valid_set_y = theano.shared(valid_Y,
                                     borrow=True)
            
        if self._network_type.lower() == "classification":
            train_set_y = T.cast(train_set_y, 'int32')
            valid_set_y = T.cast(valid_set_y, 'int32')
            
        n_train_batches = train_set_x.get_value(borrow=True).shape[0]/batch_size
        n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]/batch_size
        
        if(validation_frequency == None):
            validation_frequency = n_train_batches
        
        #Pretrain 
        if self._is_pretrain:
            print '... pre-training the model'
            start_time = time.clock()
            self.__pretrain(train_set_x, batch_size, pretrain_lr, pretraining_epochs)
            end_time = time.clock()
            print >> sys.stderr, ('The pretraining code ran for %.2fm' % ((end_time - start_time) / 60.))
        
        # Fine tuning
        print '... fine-tuning the model'
        start_time = time.clock()
        finetune_validate_model,train_score = self.__build_finetune_functions(train_set_x,train_set_y,
                                                                   valid_set_x,valid_set_y, 
                                                                   batch_size=batch_size,
                                                                   L = L, 
                                                                   isClassicalMomentum=isClassicalMomentum)
        
        self.__train_layer(finetune_validate_model,train_score, 
                           n_train_batches, n_valid_batches, training_epochs,
                           learning_rate=finetune_lr,
                           learning_rate_anneal_ratio=learning_rate_anneal_ratio,
                           mu_max=momentum_max,validation_frequency=validation_frequency,
                           isClassicalMomentum=isClassicalMomentum)
        
        end_time = time.clock()
        print >> sys.stderr, ('The finetuning code ran for %.2fm' % ((end_time - start_time) / 60.))
            
            
    def __train_layer(self,valid_score_fn,train_score_fn,
                      n_train_batches,n_valid_batches,
                      n_epochs,learning_rate,
                      learning_rate_anneal_ratio,
                      mu_max,validation_frequency,
                      isClassicalMomentum):
        
        improvement_threshold = 0.995
        best_validation_loss = numpy.inf
        done_looping = False
        epoch = 0
        num_updates = 0
        check_valid_iter = 0
        errors = numpy.zeros((n_epochs*n_train_batches/validation_frequency+1,2), dtype='float32')
        error_idx = 0
        while (epoch < n_epochs) and (not done_looping):
            
#            if epoch < 500:
#                mu = 0.5*(1. - epoch/500.) + 0.99*(epoch/500.)
#            else:
#                mu = 0.99
            epoch = epoch + 1
            for minibatch_index in xrange(n_train_batches):
                num_updates += 1
                
                #calculate momentum automatically
                if self._auto_momentum == True:
                    new_mu = 1-2**(-1-numpy.log2(num_updates/250+1))
                    if new_mu > mu_max:
                        mu = mu_max
                    else:
                        mu = new_mu
                    
                else:
                    mu = self._momentum
                
                self.__fit_batch(minibatch_index, mu, learning_rate, isClassicalMomentum)
                                  
                # iteration number
                if valid_score_fn != None:
                    if (num_updates) % validation_frequency == 0:
                        # compute zero-one loss on validation set
                        validation_losses = valid_score_fn()
                        this_validation_loss = numpy.mean(validation_losses)
                        errors[error_idx, 1] = this_validation_loss
                        print('epoch %i, minibatch %i/%i, validation error %f %% best=%f num_updates=%i' % \
                                (epoch, minibatch_index + 1, n_train_batches,
                                this_validation_loss * 100.,best_validation_loss*100.,num_updates))
                        
#                         if train_score_fn != None:
#                             train_error = numpy.mean(train_score_fn())
#                             errors[error_idx, 0] = train_error                   
#                             print "train error = %f"  % train_error
                    
                        error_idx += 1
                        # if we got the best validation score until now
                        if this_validation_loss < best_validation_loss:
                            #improve patience if loss improvement is good enough
                            if this_validation_loss < best_validation_loss *  \
                               improvement_threshold:
                                check_valid_iter = 0
                                       
                            best_validation_loss = this_validation_loss
                            
                            #Save best params
                            self.__copyParamsToBest()
                    
                        check_valid_iter = check_valid_iter + 1
                        
                        
        
        
                if check_valid_iter > self._n_iters_earlystop:
                    done_looping = True
                    
            learning_rate *= learning_rate_anneal_ratio

        #Copy best parameters into self.params
        self.__copyBestToParams()
        
        #Final validation error to check if best params are copied
        validation_losses = valid_score_fn()
        trian_losses = train_score_fn()
        final_valid_loss = numpy.mean(validation_losses)
        final_train_loss = numpy.mean(trian_losses)
        print 'final validation error = %f , final train error = %f' %(final_valid_loss,final_train_loss)
    
    def predict(self,X):
        return self.__predict(X)

    def probs(self,X):
        return self.__probs(X)
    
def test():
    f = gzip.open('../mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    print train_set[0].shape,train_set[1].shape
    f.close()
#     
#     nn = NeuralNetwork(n_input=784, hidden_layers_sizes=[1000,500,250,30,250,500,1000], 
#                        network_type = "regression", training_cost = "mean_squared",
#                        L2_reg = 0.0,
#                        activations=["sigmoid", "linear"])
# 
#     nn.fit(train_set[0],train_set[0],valid_set[0],valid_set[0],pretraining_epochs=2,
#            training_epochs=3000,batch_size=200,
#            finetune_lr = 0.001, learning_rate_anneal_ratio=1.0,
#            L=15.0,
#            isClassicalMomentum=False)
#     predVals = nn.predict(test_set[0])
#     print 'test error rate = %f' %numpy.mean(predVals != test_set[1])

#     train_set = load_diabetes();
#     data = numpy.asarray(train_set['data'], dtype = 'float32')
#     target = numpy.zeros((data.shape[0], 1), dtype = 'float32')
#     target[:,0] = numpy.asarray(train_set['target'], dtype = 'float32')
    
    
#     print data.shape, target.shape, target.dtype, data.dtype
    
    data = train_set[0]
    target = train_set[1]
    
    #normalize data
#     data = pp.scale(data)
    
    nn = NeuralNetwork(n_input=data.shape[1], hidden_layers_sizes=[100,50],
                       n_outs = 1,
                       is_sparse_data=False,
                       is_pretrain=True,corruption_levels=[0.1,0.1],sampling=1.0,
                       network_type = "classification", training_cost = "cross_entropy",
                       L2_reg = 0.0001,
                       activations=["tanh","tanh", "softmax"])
 
    nn.fit(data,target,
           training_epochs=5000,batch_size=78,
           valid_ratio = 78.0/442.0, n_iters_earlystop = 10000,
           shuffleData = True,
           pretraining_epochs=10,pretrain_lr=0.01,
           finetune_lr = 0.001, learning_rate_anneal_ratio=1.0,
           L=15.0,
           isClassicalMomentum=True,
           momentum = 0)
    predVals = nn.predict(data)
    print 'test error rate = %f' % numpy.sqrt(numpy.mean((predVals - target)**2))
    
    model = lm.LinearRegression()
    model.fit(data, target)
    predVals = model.predict(data)
    print 'test error rate = %f' % numpy.sqrt(numpy.mean((predVals - target)**2))
    
if __name__ == '__main__':
    test()
    pass
