runrandForAnalysis <- function(arrayData,rfsize,ntree2try, rowNames, rowClasses, niter, Rfolder)
{
 # Random Forest library
 library(randomForest);
 # My functions
 print(Rfolder);
 setwd(Rfolder);
 print(getwd());
 source("mydataFrame.txt");
 source("myRandomForest.txt");
 source("tuneRandomForest.txt");
 
 iterat <- 1:niter;
 
 results = mat.or.vec(niter, 4);
 
 # The Data set
 myData <- mydataFrame(arrayData,rowNames, rowClasses);
 dd=dim(myData)
 nvar=dd[2]-1
 nsp = dd[1]
 var_impt = mat.or.vec(nvar,1); 
 var_ind =  mat.or.vec(nvar,1);
 # Make this flexible to have more than two classes
 votes_class1= mat.or.vec(nsp,1);
 votes_class2= mat.or.vec(nsp,1);


 for (i in iterat){
      # Computing the best number of variables for split

      myData_rf<- tuneRandomForest(myData,ntree2try);
      
      print(myData_rf)
      # look for the best oob. This assumes that tuneRF was run using     #doBEST
      results[i,1] = myData_rf$mtry;
      results[i,2] = median(myData_rf$err.rate[,1]);
      results[i,3] = median(myData_rf$err.rate[,2]);
      results[i,4] = median(myData_rf$err.rate[,3]);


      # remember that when using tuneRF, importance is given according to gini index
      tmp = data.frame(myData_rf$importance[,1])

      print(tmp);
      #tmp = matrix(ind$ix[1:nvar],nrow=nvar,ncol=1);
      
      #var_impt = cbind(var_impt,tmp[,2]);
      var_ind = cbind(var_ind,tmp);
      
      # Votes for each sample in each class
      tmp1 = data.frame(myData_rf$votes[,1]); 
      tmp2 = data.frame(myData_rf$votes[,2]);
      
      votes_class1 = cbind(votes_class1,tmp1)
      votes_class2 = cbind(votes_class2,tmp2)
     }

 # Get the median value for the number of variables used
 var2split = median(results[i,1]);
 meanoob = mean(results[i,2]);
 meanoobcl1 = mean(results[i,3]); 
 meanoobcl2 = mean(results[i,4]); 
 sdoob = sd(results[i,2]);
 sdoobcl1 = sd(results[i,3]);
 sdoobcl2 = sd(results[i,4]); 
 
 sum_oob = rbind(var2split,meanoob,sdoob, meanoobcl1,sdoobcl1,meanoobcl2,sdoobcl2);
 rownames <- c("var2split","meanoob","sdoob", "meanoobcl1","sdoobcl1","meanoobcl2","sdoobcl2");
 sum_oob <- data.frame(sum_oob,row.names=rownames);
 

 # Obtain the most important variables for the classification 
 di = dim(var_ind)[2];
 # print(var_impt)
 print(var_ind)
 med_varind = apply(var_ind[,2:di],1,median);
 sum_var = sort(med_varind,decreasing=TRUE);
 #coln = dimnames(data)[[2]];
 #coln[1] = "variables";
 #coln[2] = "gini_index"
 #colnames(sum_var) <- coln;


 # Obtain the "average" votes for each sample, given for all forest in iterat
 dj = dim(votes_class1)[2];
 mean_votes_cl1 = apply(votes_class1[,2:dj],1,mean);
 mean_votes_cl2 = apply(votes_class2[,2:dj],1,mean);
 sd_votes_cl1 = apply(votes_class1[,2:dj],1,sd);
 sd_votes_cl2 = apply(votes_class2[,2:dj],1,sd); 
 
 ### Bind results
 summary = cbind(mean_votes_cl1,sd_votes_cl1,mean_votes_cl2,sd_votes_cl2);


 # Run a final random Forest
 finalRF <- runRandomForest(myData,rfsize,var2split); 
 
 ## Writing results to a file
 write.table(sum_var,file="important_vars.txt",sep="\t",row.names=TRUE)   
 write.table(summary,file="avg_votes_sample.txt",sep="\t", row.names=TRUE, col.names=TRUE)
 write.table(sum_oob,file="summary_oob.txt",sep="\t", row.names=TRUE) 
  
 write.table(cbind(finalRF$predicted,summary),file="predicted_class.txt",sep="\t", row.names=TRUE, col.names=TRUE)
 write.table(finalRF$predicted,file="predicted_class2.txt",sep="\t", row.names=TRUE, col.names=TRUE)
 write.table(finalRF$confusion,file="confusion_table.txt",sep="\t", row.names=TRUE, col.names=TRUE)


 return(list(sum_var,finalRF))

}

## Deprecated commands, but useful in other instances
#results[i,1] = myData_rf$mtry
#results[i,2] = median(myData_rf$err.rate[,1])
#results[i,1] = mytry[mytry[,2]==min(mytry[,2]),1]
#results[i,2] = min(mytry[,2])
