setupLinux <- function(nbCores)
{
  setwd("~/lab3r")
  library(doMC)
  registerDoMC(nbCores)
}

run <- function()
{
  nbCores<-1
  setupLinux(nbCores=nbCores)
  
  #************* TO MODIF **************
  mHidden <- 400
  mMapping <- c(1,2,3,4,5,6,7,9,10,11,12)
  mNbTest <- 1
  mMaxit <- 325
  mLfc <- c(0.01, 0.7)
  #*************************************
  
  
  source("f_measures.R")
  library("RSNNS")
  
  window <- 20                                    #window length      ----- complete -----
  hidden <- mHidden
  number.index.mapping <- cbind(  c(1,3,5),       #[,1] wristAccelMean
                                  c(2,4,6),       #[,2] wristAccelSD
                                  c(7,9,11),      #[,3] wristGyroMean
                                  c(8,10,12),     #[,4] wristGyroSD
                                  c(13,15,17),    #[,5] legAccelMean
                                  c(14,16,18),    #[,6] legAccelSD
                                  c(19,21,23),    #[,7] legGyroMean
                                  c(20,22,24),    #[,8] legGyroSD
                                  c(25,27,29),    #[,9] torsoAccelMean
                                  c(26,28,30),    #[,10] torsoAccelSD
                                  c(31,33,35),    #[,11] torsoGyroMean
                                  c(32,34,36) )   #[,12] torsoGyroSD
  
  
  
  
  labels <- read.table(paste("label_w", window, "_o", window/2, ".tab", sep=""), sep="\t", header=FALSE)[,1]                              #read the file containing the labels
  features <- read.table(paste("meanSD_w", window, "_o", window/2, ".tab", sep=""), sep="\t", header=FALSE, colClasses=rep("numeric",12)) #read the file containing the features
  features.selected.index <- as.numeric(number.index.mapping[,mMapping])        #----- complete -----                                   #compute the index of the desired features
  
  number.of.tests <- mNbTest                                                                               #number of tests
                                                                      #number of processors working in parallel
  #total number of tests = number.of.tests * number.of.parralel.tests
  
  library(foreach)
  
  results <- data.frame(training=NULL, test=NULL)
  listModels <- list()
  
  for (test.index in 1:number.of.tests) {
    print(paste("Test number:", test.index))
    
    ##############################################################################
    datasets <- foreach(test.index = 1:nbCores) %dopar%
    {
      new.order <- sample(1:nrow(features), nrow(features))
      features.shuffled <- features[new.order, features.selected.index]                           #shuffle the rows of the features dataset
      labels.shuffled <- decodeClassLabels(labels[new.order], valTrue=0.95, valFalse=0.05)        #suffle the rows of the labels dataset
      
      dataset.split <- splitForTrainingAndTest(features.shuffled, labels.shuffled, ratio=0.15)    #split the dataset in 2 parts: training and test
      dataset.normalized <- normTrainingAndTestSet(dataset.split)                                 #normalize both parts (training and test)
      c(dataset.normalized, dataset.split)
    }
    
    
    ##############################################################################
    models <- foreach (test.index = 1:nbCores) %dopar%
    { 
      mlp(   x=datasets[[1]]$inputsTrain,                                                #input data for training
                      y=datasets[[1]]$targetsTrain,                                               #output data (targets) for training
                      size=hidden,                                                                #number of neurons in the hidden layer
                      learnFunc="BackpropMomentum",                                               #type of learning
                      learnFuncParams=mLfc,                                                    #paramenters of the learning function (eta, mu) ----- complete -----
                      maxit=mMaxit,                                                                    #maximum number of iterations                   ----- complete -----
                      inputsTest=datasets[[1]]$inputsTest,                                        #input data for testing
                      targetsTest=datasets[[1]]$targetsTest)                                      #output data (targets) for testing
    }
    plotIterativeError(models[[1]])                                                             #plots the evolution of the error through the training process
    
    ##############################################################################
    results.temp <- foreach (test.index = 1:nbCores, .combine=rbind) %dopar%
    {
      prediction <- predict(models[[1]], datasets[[1]]$inputsTrain)
      prediction.class <- apply(prediction, 1, which.max)
      labels.class <- apply(datasets[[1]]$targetsTrain, 1, which.max)
      temp.training <- f.measure(labels.class[labels.class!=1], prediction.class[labels.class!=1])#compute the performance (F1-score) without the null class
      
      prediction <- predict(models[[1]], datasets[[1]]$inputsTest)
      prediction.class <- apply(prediction, 1, which.max)
      labels.class <- apply(datasets[[1]]$targetsTest, 1, which.max)
      temp.test <- f.measure(labels.class[labels.class!=1], prediction.class[labels.class!=1])    #compute the performance (F1-score) without the null class
      
      c(temp.training, temp.test)
    }
    results <- rbind(results, results.temp)                                                       #append the results
  }
  
  print(results)
  #boxplot(results, notch=TRUE)                                                            #plot the results
  #grid()
}

print(system.time(replicate(1, run())))