/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
/*                                                                               */
/*    Copyright (C) 2007 Open Microscopy Environment                             */
/*         Massachusetts Institue of Technology,                                 */
/*         National Institutes of Health,                                        */
/*         University of Dundee                                                  */
/*                                                                               */
/*                                                                               */
/*                                                                               */
/*    This library is free software; you can redistribusplit_numte it and/or     */
/*    modify it under the terms of the GNU Lesser General Public                 */
/*    License as published by the Free Software Foundation; either               */
/*    version 2.1 of the License, or (at your option) any later version.         */
/*                                                                               */
/*    This library is distributed in the hope that it will be useful,            */
/*    but WITHOUT ANY WARRANTY; without even the implied warranty of             */
/*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU          */
/*    Lesser General Public License for more details.                            */
/*                                                                               */
/*    You should have received a copy of the GNU Lesser General Public           */
/*    License along with this library; if not, write to the Free Software        */
/*    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA  */
/*                                                                               */
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
/*                                                                               */
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
/* Written by:  Lior Shamir <shamirl [at] mail [dot] nih [dot] gov>              */
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/


#include "TrainingSet.h"

#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <math.h>
// variadic
#include <stdarg.h>
// system errors
#include <errno.h>

#define MAX_SPLITS 100
#define MAX_SAMPLES 190000

/* global variable */
int print_to_screen=1;
#define MAX_ERROR_MESSAGE 512
char error_message[MAX_ERROR_MESSAGE]="";

/*
   Accumulates errors and warnings to be shown later
   N.B.: Variadic - use like printf
*/
void catError (const char *fmt, ...) {
va_list ap;
size_t len_error_message = strlen(error_message);
char newline=0;

	// process the printf-style parameters
	va_start (ap, fmt);
	len_error_message += vsnprintf (error_message+len_error_message,ERROR_MESSAGE_LNGTH-len_error_message, fmt, ap);
	va_end (ap);
	
	if (errno != 0) {
	// Append any system error string
		if ( *(error_message+len_error_message-1) == '\n' ) {
			len_error_message--;
			newline = 1;
		}
		len_error_message += snprintf (error_message+len_error_message,ERROR_MESSAGE_LNGTH-len_error_message, ": ");
		strerror_r(errno, error_message+len_error_message, ERROR_MESSAGE_LNGTH-len_error_message);
		len_error_message = strlen(error_message);
		if (newline) snprintf (error_message+len_error_message,ERROR_MESSAGE_LNGTH-len_error_message, "\n");
		errno = 0;
	}
}


/*
   displays an error message and optionally stops the program
   If stopping, shows the accumulated error_message generated by catError()
   N.B.: variadic.  First parameter is the stop flag, followed by parameters same as printf
*/
int showError(int stop, const char *fmt, ...) {
va_list ap;
size_t len_error_message = strlen(error_message);

	if (stop) {
	// add the printf-style parameters to error_message
		if (fmt && *fmt) {
			va_start (ap, fmt);
			len_error_message += vsnprintf (error_message+len_error_message,ERROR_MESSAGE_LNGTH-len_error_message, fmt, ap);
			va_end (ap);
		}

	// Append any system error string
		if (errno != 0) {
			if ( *(error_message+len_error_message-1) == '\n' ) {
				*(error_message+len_error_message-1) = '\0';
			}
			perror(error_message);
	// No system error, but error_message has message
		} else if (*error_message != '\0') {
			fprintf(stderr,"%s",error_message);
			if ( *(error_message+len_error_message-1) != '\n' ) fprintf(stderr,"\n");

	// No system error, or error_message, so say something generic
		} else if (!fmt || (fmt && *fmt == '\0') ) {
			fprintf(stderr,"Fatal error - terminating.\n");
		}
		exit(0);
	} else {
		// Error_message has message
		if (*error_message != '\0') {
			fprintf(stderr,"%s",error_message);
			if ( *(error_message+len_error_message-1) != '\n' ) fprintf(stderr,"\n");
		}
		// process the printf-style parameters to stderr
		if (fmt && *fmt) {
			va_start (ap, fmt);
			vfprintf (stderr, fmt, ap);
			va_end (ap);
		}
	}
	return(0);
}

int isdigit(char c)
{  return(c>='0' && c<='9');
}

void randomize()
{
  time_t t;
  srand((unsigned) time(&t));
}

/*
check_split_params - checks parameters for consistency with regards to training/testing a given dataset.
Returns 1 on success, 0 upon failure.
*/
int check_split_params (int *n_train_p, int *n_test_p, double *train_frac_p, TrainingSet *dataset, TrainingSet *testset, int class_num, int samples_per_image, double split_ratio, int balanced_splits, int max_training_images, int max_test_images, int N) {
	double train_frac;
	int class_index, smallest_class=0;
	int max_balanced_samples,max_balanced_i;

	// Innitialize what we will be returning
	*n_train_p = 0;
	*n_test_p = 0;

	/*
	  Bounds checking on samples.
	  -i switch is in images, but training happens on samples.
	  -t (and -R) switches provide samples per image (tiles*tiles*rotations).
	  Enforce balanced classes, with option to force unbalanced classes
	    Calculate maximum number of training images (samples) for a balanced classifier (Ib = images in the smallest class)
	    -rN: ratio assumes balanced classes, force unbalanced with -r#.  Meaning is fraction of samples to be used for *testing*.
	    -iN: Number of training images used is min (iN,Ib). i#N drops classes with less than N images, resulting in a balanced classifier.
	         -r is ignored if both -i and -r are specified.
	    -jN: Parameter is ignored if a separate test .fit is supplied.  All samples in the test .fit are used for testing in each split
	         Balanced testing is enforced (jN = min (jN,Ib-iN).  Program terminates if jN <= 0).
	         Unbalanced testing can be done by using unbalanced test samples in the test .fit
	         If -r#, testing is still balanced (jN = min (jN,Ib-iN), where iN is rN*Ib)
	*/
	
	max_balanced_samples=MAX_SAMPLES;
	for (class_index = 1; class_index <= dataset->class_num; class_index++) {
		if (dataset->class_nsamples[class_index] < max_balanced_samples) {
			max_balanced_samples = dataset->class_nsamples[class_index];
			smallest_class = class_index;
		}
	}
	max_balanced_i = max_balanced_samples / samples_per_image;

	// Check provided parameters against balanced testing/training
	if (max_training_images > 0) { // N.B.: -i overrides -r
		if (max_training_images > max_balanced_i && testset) {
			catError("WARNING: Specified training images (%d) exceeds maximum for balanced training (%d).  %d images used for training.  Use -r# instead of -i to over-ride balanced training.\n",
				max_training_images,max_balanced_i,max_balanced_i);
			max_training_images = max_balanced_i;
		} else if (max_training_images >= max_balanced_i && testset == NULL) { // No images left for testing unless we have a test .fit
			catError("ERROR: Specified training images (%d) exceeds maximum for balanced training (%d).  No images left for testing.\n",max_training_images,max_balanced_i-1);
			catError("  Use -iN with N < %d.\n",max_balanced_i);
			catError("  Or, use -rN with 1.0 < N > 0.0\n");
			catError("  Or, use -r#N to over-ride balanced training.\n");
			delete dataset;
			return (0);
		}
		split_ratio = 0.0; // Don't base splits on a ratio - use max_training_images/max_test_images
	} else { // -i unspecified, use split_ratio (default or specified - already set in main)
		max_training_images = floor( (split_ratio * (float)max_balanced_i) + 0.5 ); // rounding
		if (max_training_images >= max_balanced_i && testset == NULL) { // rounding error left no test images
			catError("ERROR: No images left for testing using specified -r=%f.\n",split_ratio);
			catError("  Use -rN with N < %f\n", ((float)max_balanced_i - 1.0) / (float)max_balanced_i);
			catError("  Or, use -iN with N < %d.\n",max_balanced_i);
			catError("Exiting - no testing performed\n");
			delete dataset;
			return (0);
		}
		// If an exact split ratio was specified, then use it.
		// Otherwise, the same number of training images is used in each class, specified by max_training_images
		if (balanced_splits) split_ratio = 0.0;
	}
	// Note that checks above leave max_training_images < max_balanced_i
	
	if (max_test_images > 0) { // -jN specified
		if (testset) {
			catError("WARNING: The -j%d parameter is ignored when a test set is specified (%s).\n",max_test_images,testset->source_path);
		} else if ( max_test_images > (max_balanced_i - max_training_images) ) { // -jN always balanced unless test .fit
			if (max_balanced_i - max_training_images > 0) {
				catError("WARNING: Insufficient images for balanced training (%d) and specified testing (%d).  %d images used for testing.\n",
					max_training_images,max_test_images,max_balanced_i - max_training_images);
				max_test_images = max_balanced_i - max_training_images;
			}
		}
	} else { // -jN not specified
		max_test_images = max_balanced_i - max_training_images;
	}

	// Set the return values
	*n_train_p = max_training_images;
	*n_test_p = max_test_images;
	return (1);

}


int split_and_test(TrainingSet *ts, char *report_file_name, int class_num, int method, int samples_per_image, double split_ratio, int balanced_splits, double max_features, double used_mrmr, long split_num,
	int report,int max_training_images, int exact_training_images, int max_test_images, char *phylib_path,int phylip_algorithm,int export_tsv,
	long first_n, char *weight_file_buffer, char weight_vector_action, int N, TrainingSet *testset, int ignore_group, int tile_areas, int max_tile, int image_similarities, int random_splits) {
	TrainingSet *train,*test,**TilesTrainingSets=NULL;
	data_split splits[MAX_SPLITS];
	char dataset_name[128],group_name[64];
	FILE *output_file;
	int split_index,tile_index;
	int n_train,n_test;
	double train_frac;
	int class_index, smallest_class=0;
	int res;

	// Remove classes from the end if N is specified
	if (N>0) while (ts->class_num>N) ts->RemoveClass(ts->class_num);

	// Check the parameters and set train/test image numbers
	if (!check_split_params (&n_train, &n_test, &train_frac, ts, testset,
		class_num, samples_per_image, split_ratio, balanced_splits, max_training_images, max_test_images, N))
			return(showError(1, NULL));

	// If a testset was specified, make sure its classes are consistent with ts.
	if (testset) {
		for (class_index = 1; class_index <= ts->class_num; class_index++) {
//printf("comparing train class '%s' with test '%s'\n",ts->class_labels[class_index],testset->class_labels[class_index]);
			while ( testset->class_num && strcmp(ts->class_labels[class_index],testset->class_labels[class_index]) ) {
//printf("dropping test class %d '%s'\n",class_index,testset->class_labels[class_index]);
				catError ("WARNING: Test set class label '%s' does not match any training set class.  Marked UNKNOWN.\n",testset->class_labels[class_index]);
				testset->MarkUnknown (class_index);
//printf("test class %d now '%s'\n",class_index,testset->class_labels[class_index]);
			}
		}
		// mark any extra classes as unknown
		for (class_index = ts->class_num+1; class_index <= testset->class_num;class_index++) {
//printf("dropping extra test class '%s'\n",testset->class_labels[class_index]);
			catError ("WARNING: Test set class label '%s' does not match any training set class.  Marked UNKNOWN.\n",testset->class_labels[class_index]);
			testset->MarkUnknown (class_index);
		}

		// Now that they're consistent, remove classes based on N.
		if (N>0) while (testset->class_num > N) testset->RemoveClass(testset->class_num); /* cut the number of classes also in the test file */
		n_test = testset->count / samples_per_image;
//for (class_index = 0; class_index <= ts->class_num; class_index++) printf("ts class '%s' is test class '%s'\n",ts->class_labels[class_index],testset->class_labels[class_index]);
//for (class_index = 0; class_index < testset->count; class_index++) printf("sample %d '%s' class '%s' \n",class_index,testset->samples[class_index]->full_path, testset->class_labels[testset->samples[class_index]->sample_class]);

	}

/*
	In ts-split(),
      if (test->count > 0) number_of_test_samples = 0; // test already has samples from a file
      // if ratio is 0, use the max_train_samples and max_test_samples (balanced training and testing)
      // if ratio is > 0 and <= 1, use ratio (unbalanced training)      
*/
	if (print_to_screen) printf ("samples per image=%d, training images: %d, testing images %d training fraction=%f\n",samples_per_image,n_train,n_test,train_frac);
     for (split_index=0;split_index<split_num;split_index++)
     { double accuracy;
       double feature_weight_distance=-1.0;

       train=new TrainingSet(ts->count,ts->class_num);
       if (testset) test = testset;
       else test=new TrainingSet(ts->count,ts->class_num);
       splits[split_index].confusion_matrix=new unsigned short[(ts->class_num+1)*(ts->class_num+1)];
       splits[split_index].similarity_matrix=new double[(ts->class_num+1)*(ts->class_num+1)];
       splits[split_index].similarity_normalization=new double[ts->class_num+1];
       splits[split_index].feature_names=new char[ts->signature_count*80];
       if (tile_areas)
       {  splits[split_index].tile_area_accuracy=new double[samples_per_image];
          for (tile_index=0;tile_index<samples_per_image;tile_index++) splits[split_index].tile_area_accuracy[tile_index]=0.0;
       }
       else splits[split_index].tile_area_accuracy=NULL;
       if (ts->signature_count>2500) splits[split_index].feature_groups=new char[ts->signature_count*80];
       else splits[split_index].feature_groups=NULL;

       res=ts->split(random_splits,train_frac,train,test,samples_per_image,n_train,n_test,exact_training_images);
       catError (ts->error_message);
       if ( res < 0) return (res);
//printf ("test set: %ld samples, train: %ld samples\n",test->count,train->count);
       if (image_similarities) splits[split_index].image_similarities=new double[(1+test->count/(samples_per_image))*(1+test->count/(samples_per_image))];
       else splits[split_index].image_similarities=NULL;

//int temp=train->class_num;
//train->class_num=1;
       if (tile_areas)  /* split into several datasets such that each dataset contains tiles of the same location */
       {  TilesTrainingSets=new TrainingSet*[samples_per_image];
          res = train->SplitAreas(samples_per_image, TilesTrainingSets);
          catError (train->error_message);
          if (res < 0) return (res);
          for (tile_index=0;tile_index<samples_per_image;tile_index++)
          {  TilesTrainingSets[tile_index]->normalize();
             TilesTrainingSets[tile_index]->SetFisherScores(max_features,used_mrmr,NULL);
          }
       }
	   else
       {  train->normalize();                                           /* normalize the feature values of the training set */
          train->SetFisherScores(max_features,used_mrmr,&(splits[split_index]));  /* compute the Fisher Scores for the image features */
       }
//train->class_num=temp;
       if (weight_vector_action=='w')
         if(!train->SaveWeightVector(weight_file_buffer))
           showError(1,"%sCould not write weight vector to '%s'\n",train->error_message,weight_file_buffer);
       if (weight_vector_action=='r' || weight_vector_action=='+' || weight_vector_action=='-')
       {  feature_weight_distance=train->LoadWeightVector(weight_file_buffer,(weight_vector_action=='+')-(weight_vector_action=='-'));
          if (tile_areas) for (tile_index=0;tile_index<samples_per_image;tile_index++) feature_weight_distance=TilesTrainingSets[tile_index]->LoadWeightVector(weight_file_buffer,(weight_vector_action=='+')-(weight_vector_action=='-'));	   
          if (feature_weight_distance<0) showError(1,"%sCould not load weight vector from '%s'\n",train->error_message,weight_file_buffer);
       }
       if (report) splits[split_index].individual_images=new char[(int)((test->count/(samples_per_image))*(class_num*15))];
       else splits[split_index].individual_images=NULL;
       if (ignore_group)   /* assign to zero all features of the group */
       {  if (!(ts->IgnoreFeatureGroup(ignore_group,group_name))) {
			delete train;if (!testset) delete test;delete splits[split_index].confusion_matrix;delete splits[split_index].similarity_matrix;delete splits[split_index].feature_names;delete splits[split_index].feature_groups;delete splits[split_index].individual_images;
			showError(1,"%sErrors while trying to ignore group %d '%s'\n",train->error_message,ignore_group,group_name);
			return(0);
		  }
       }
       
	// Label the columns
		if (print_to_screen) {
			printf("image\t");
			if (ts->is_continuous) { /* continouos values */
				printf("act. val.\tpred. val.\n");
			} else {
				printf ("norm. fact.\t");
				for (class_index=1;class_index<=ts->class_num;class_index++) {
					printf("p(%s)\t",ts->class_labels[class_index]);
				}
				printf("act. class\tpred. class\n");
			}
		}
			
       accuracy=train->Test(test,method,samples_per_image,tile_areas,TilesTrainingSets,max_tile,first_n,&(splits[split_index]));

       splits[split_index].feature_weight_distance=feature_weight_distance;
       splits[split_index].accuracy=accuracy;
       splits[split_index].method=method;
       splits[split_index].pearson_coefficient=test->pearson(samples_per_image,&(splits[split_index].avg_abs_dif),&(splits[split_index].pearson_p_value));

       if (!report && !ignore_group)   /* print the accuracy and confusion and similarity matrices */
       {  ts->PrintConfusion(stdout,splits[split_index].confusion_matrix,NULL);//,0,0);
          ts->PrintConfusion(stdout,NULL,splits[split_index].similarity_matrix);//,0,0);
          if (ts->is_continuous) printf("Pearson Correlation: %f \n\n",splits[split_index].pearson_coefficient);
		  else printf("\nAccuracy: %f \n\n",accuracy);
       }

     if (TilesTrainingSets)    /* delete the training sets allocated for the different areas */
     {  for (tile_index=0;tile_index<samples_per_image;tile_index++)
        delete TilesTrainingSets[tile_index];
        delete TilesTrainingSets;
     }
	 delete train;
	 if (!testset) delete test;
    } 
    printf("\n\n");
	if (!report)    /* print the average accuracy */
	{  int split_index;
       double avg_accuracy=0,avg_pearson=0;
       for (split_index=0;split_index<split_num;split_index++) avg_accuracy+=splits[split_index].accuracy;
       for (split_index=0;split_index<split_num;split_index++) avg_pearson+=splits[split_index].pearson_coefficient;       
       if (ignore_group) printf("Accuracy assessment without using feature group '%s' - ",group_name); 
       if (ts->is_continuous) printf("Average Pearson Correlation (%ld splits): %f\n",split_num,avg_pearson/(double)split_num);
       else printf("Average accuracy (%ld splits): %f\n",split_num,avg_accuracy/(double)split_num);
	}
	
    strcpy(dataset_name,ts->source_path);
    if (strrchr(dataset_name,'.')) *strrchr(dataset_name,'.')='\0';
    if (strrchr(dataset_name,'/'))   /* extract the file name */
    {  char buffer[128];
       strcpy(buffer,&(strrchr(dataset_name,'/')[1]));
       strcpy(dataset_name,buffer);
    }
    if (report)
    {  if (report_file_name)
	   {  if (!strchr(report_file_name,'.')) strcat(report_file_name,".html");
	      output_file=fopen(report_file_name,"w");
	      if (!output_file) showError(1, "Could not open file for writing '%s'\n",report_file_name);
	   }
	   else output_file=stdout;     
	   ts->report(output_file,report_file_name,dataset_name,splits,split_num,samples_per_image,n_train,phylib_path,phylip_algorithm,export_tsv,testset->source_path,image_similarities);   
	   if (output_file!=stdout) fclose(output_file);
	   /* copy the .ps and .jpg of the dendrogram to the output path of the report and also copy the tsv files */
	   if (export_tsv || phylib_path)
	   {  char command_line[512],ps_file_path[512];
            strcpy(ps_file_path,report_file_name);
            (strrchr(ps_file_path,'/'))[1]='\0';	   
            if (phylib_path && (strchr(phylib_path,'/'))) 
            {  sprintf(command_line,"mv ./%s*.ps %s",dataset_name,ps_file_path);
               system(command_line);
               sprintf(command_line,"mv ./%s*.jpg %s",dataset_name,ps_file_path);
               system(command_line);
            }
            if (export_tsv)
            {  sprintf(command_line,"cp -r ./tsv %s",ps_file_path);
               system(command_line);		  
               sprintf(command_line,"rm -r ./tsv");
               system(command_line);
            }
	   }
    }

    for (split_index=0;split_index<split_num;split_index++)
    {  delete splits[split_index].confusion_matrix;	
       delete splits[split_index].similarity_matrix;
       if (splits[split_index].feature_names) delete splits[split_index].feature_names;
       if (splits[split_index].feature_groups) delete splits[split_index].feature_groups;
       if (splits[split_index].individual_images) delete splits[split_index].individual_images;
       if (splits[split_index].tile_area_accuracy) delete splits[split_index].tile_area_accuracy;
       if (splits[split_index].image_similarities) delete splits[split_index].image_similarities;	   
    }

    return(1);
}


void ShowHelp()
{
   printf("\n"PACKAGE_STRING"\nLaboratory of Genetics/NIA/NIH \n");
   printf("usage: \n======\nwndchrm [ train | test | classify ] [-mtslcdowfrijnpqvNSBACDTh] [<dataset>|<train set>] [<test set>|<feature file>] [<report_file>]\n");
   printf("  <dataset> is a <root directory>, <feature file>, <file of filenames>, <image directory> or <image filename>\n");
   printf("  <root directory> is a directory of sub-directories containing class images with one class per sub-directory.\n");
   printf("      The sub-directory names will be used as the class labels. Currently supported file formats: TIFF, PPM. \n");
   printf("  <feature file> is the file generated by the train command containing all computed image features (should end in .fit).\n");
   printf("       This filename is a required parameter for storing the output of 'train'\n");       
   printf("  <file of filenames> is a text file listing <image filename>s and corresponding class labels\n");
   printf("      separated by a <TAB> character (a tab delimited file, or .tsv). Lines beginning with '#' are ignored\n");       
   printf("  <image directory> is a directory of image files. The class cannot be specified so these can only be used as a <test set>.\n");    
   printf("  <image filename> is the full path to an image file. The classes cannot be specified so these can only be used as a <test set>.\n");    
   printf("  <train set> is anything that qualifies as a <dataset>, but must contain at least two (2) defined classes.\n");
   printf("      An <image filename> or <image directory> cannot define classes.\n");
   printf("  <test set> is anything that qualifies as a <dataset>.  The <train set> will be used to classify the <test set>.\n");
   printf("      This parameter is required for 'classify' and is optional for 'test' (when doing internal tests of the <train set>\n");
   printf("  <report_file> is a report of the test/classify results in html format (must end in .htm or .html).\n");
   printf("\noptions:\n========\n");
   printf("m - Allow running multiple instances of this program concurrently, save (and re-use) pre-calculated .sig files.\n");
   printf("    This will save and re-use .sig files, making this option useful for single instances/processors as well\n");
   printf("R - Add rotations to training images (0,90,180,270 degrees).\n");
   printf("t[#][^]N - split the image into NxN tiles. The default is 1. If the '#' is specified, each tile location is used as\n");
   printf("           a seperate dataset (for testing only). If '^' is specified only the closest tile is used. \n");
   printf("l - Use a large image feature set.\n");
   printf("c - Compute color features.\n");
   printf("dN - Downsample the images (N percents, where N is 1 to 100)\n");
   printf("s - silent mode.\n");
   printf("o - force overwriting pre-computed .sig files.\n");   
   printf("w - Classify with wnn instead of wnd. \n");
   printf("fN[:M] - maximum number of features out of the dataset (0,1) . The default is 0.15. \n");
   printf("r[#]N - Fraction of images/samples to be used for training (0,1). The default is 0.75 of\n");
   printf("        the smallest class. if '#' is specified, force unbalanced training\n");
   printf("i[#]N - Set a maximal number of training images (for each class). If the '#' is specified then\n");
   printf("        the class is ignored if it doesn't have at least N samples.\n");
   printf("jN - Set a maximal number of test images (for each class). \n");
   printf("nN - Number of repeated random splits. The default is 1.\n");
   printf("p[+][k][#][path] - Output a full report in HTML format. 'path' is an optional path to phylip root dir\n");
   printf("   for generating dendrograms. The optinal '+' creates a directory and exports the data into tsv files.\n");
   printf("   'k' is an optional digit (1..3) of the specific phylip algorithm to be used. '#' generates a map of the test images\n");
   printf("qN - the number of first closest classes among which the presence of the right class is considered a match.\n");
   printf("v[r|w|+|-][path] - read/write the feature weights into a file.\n");   
   printf("Nx - set the maximum number of classes (use only the first x classes).\n");
   printf("Sx[:y] - normalize the images such that the mean is set to x and (optinally) the stddev is set to y.\n");   
   printf("Bx,y,w,h - compute features only from the (x,y,w,h) block of the image.\n");      
   printf("A - assess the contribution of each group of image features independently.\n");
   printf("C - *highly experimental* perform interpolation on a continuous scale rather than discrete classes\n");
   printf("    All class labels must be interpretable as numbers.\n");
   printf("D[path] - feature file name (.fit file) to save the <dataset> or <train set>.\n");
   printf("T[path] - feature file name (.fit file) to save the <test set>.\n");
   printf("h - show this note.\n\n");
   printf("examples:\n=========\n");
   printf("train:\n");
   printf("  wndchrm train /path/to/dataset/ dataset.fit\n");
   printf("  wndchrm train -mcl /path/to/dataset/ testset.fit\n");
   printf("test:\n");
   printf("  wndchrm test -f0.1 dataset.fit\n");
   printf("  wndchrm test -f0.1 -r0.9 -n5 dataset.fit testset.fit\n");
   printf("  wndchrm test -f0.2 -i50 -j20 -n5 -p/path/to/phylip3.65 dataset.fit testset.fit report.html\n");
   printf("  N.B.: By default, the -r or -i parameters will be used to make a balanced training set (equal number of images per class).\n");
   printf("       -r#N can be used to override this default, so that the N fraction of each class will be used for training.\n");
   printf("       If a <test set> is specified, it will be used as the test set for each 'split', but training images will\n");
   printf("       still be randomly chosen from <train set>)\n");
   printf("classify:\n");
   printf("   wndchrm classify dataset.fit /path/to/image.tiff\n");
   printf("   wndchrm classify -f0.2 -cl /path/to/root/dir /path/to/image/directory/\n");
   printf("   wndchrm classify -f0.2 -cl -Ttestset.fit dataset.fit /path/to/image/file_of_filenames.tsv\n");
   printf("   N.B.: classify will use -r or -i to train with fewer than all of the images in <dataset>\n");
   printf("       Unlike 'test', 'classify' will chose the training images in order rather than randomly.\n");
   printf("       classify will ignore the -n parameter because the result will be the same for each run or split.\n");
   printf("       The default -r for 'classify' is 1.0 rather than the 0.75 used in 'test'.\n");
   printf("\nAdditional help:\n================\n");
   printf("A detailed description can be found in: Shamir, L., Orlov, N., Eckley, D.M., Macura, T., Johnston, J., Goldberg, I.\n");
   printf("  Wndchrm - an open source utility for biological image analysis, BMC Source Code for Biology and Medicine, 3:13, 2008.\n");   
   printf("\nIf you have more questions about this software, please email me (Ilya Goldberg) at <igg [at] nih [dot] gov> \n\n");
   return;
}


int main(int argc, char *argv[])
{   char *dataset_path=NULL, *testset_path=NULL;
    int multi_processor=0;
    int arg_index=1;
    int tiles=1;
    int rotations=1;
    int tile_areas=0;
    int method=1;
    int report=0;
    int splits_num=1;
    int large_set=0;
    int colors=0;
    int downsample=100;
    double split_ratio=0.75;
    double max_features=0.15;
    double used_mrmr=0.0;
    int max_training_images=0;
    int max_test_images=0;
    int train=0;
    int test=0;
    int classify=0;
    char phylib_path_buffer[256];
    char *phylib_path=NULL;
    char report_file_buffer[256];
    char *report_file=NULL;
    int export_tsv=0;
    int phylip_algorithm=0;
    int exact_training_images=0;
    long first_n=1;
    char weight_file_buffer[256];
    char weight_vector_action='\0';
    char *test_set_path=NULL;
    int N=0;                         /* use only the first N classes                               */
    double mean=-1;                  /* normalize all image to a sepcified mean                    */
    double stddev=-1;                /* normalize all image to a sepcified standard deviation      */
    int assess_features=0;           /* assess the contribution of each feature to the performance */
    rect bounding_rect={-1,-1,-1,-1};/* a bounding rect from which features should be computed     */
    int image_similarities=0;        /* generate a dendrogram showing the similarity of the images */
    int max_tile=0;                  /* use only the closest tile                                  */
	int overwrite=0;                 /* force overwriting of pre-computed .sig files               */
	char *dataset_save_fit=NULL;     /* path to save the dataset/train set                         */
	char *testset_save_fit=NULL;     /* path to save the test set                                  */
	char *char_p,*char_p2;
	int balanced_splits=1;           /* when 1, use balanced training.  Override with -r#          */
	int random_splits=1;             /* when 1 randomly chose training images, when 0 add them in read order */
	int do_continuous=0;
    /* read parameters */
    if (argc<2)
    {  ShowHelp();
       return(1);
    }

    if (strcmp(argv[arg_index],"train")==0) train=1;
    if (strcmp(argv[arg_index],"test")==0) {
    	test=1;
    	split_ratio = 0.75;
    	random_splits = 1;
    }
    if (strcmp(argv[arg_index],"classify")==0) {
    	classify=1;
    	split_ratio = 1.0;
    	random_splits = 0; // use order in the input file
    }
    if (!train && !test && !classify)
    {  ShowHelp();
       return(1);
    }
    arg_index++;

	/* read the switches */
    while (argv[arg_index][0]=='-')
    {   char *p,arg[32];
	    if (argv[arg_index][1]=='p')
        {  report=1;
		   if ((strchr(argv[arg_index],'p')[1])=='+') export_tsv=1;
		   if (isdigit(strchr(argv[arg_index],'p')[1+export_tsv])) phylip_algorithm=(strchr(argv[arg_index],'p')[1+export_tsv])-'0';
		   image_similarities=((strchr(argv[arg_index],'p')[1+export_tsv+(phylip_algorithm>0)])=='#');
           if ((strchr(argv[arg_index],'p')[1+export_tsv+image_similarities+(phylip_algorithm>0)])=='/' || (strchr(argv[arg_index],'p')[1+export_tsv+image_similarities+(phylip_algorithm>0)])=='.')
		   {   strcpy(phylib_path_buffer,&(strchr(argv[arg_index],'p')[1+export_tsv+image_similarities+(phylip_algorithm>0)]));
               phylib_path=phylib_path_buffer;
		   }
		   if (phylip_algorithm<=0) phylip_algorithm=1;   /* set the default */
		   arg_index++;
		   continue;	/* so that the path will not trigger other switches */
        }
		if (argv[arg_index][1]=='v' && strlen(argv[arg_index])>3)
		{  weight_vector_action=argv[arg_index][2];
		   if (weight_vector_action!='r' && weight_vector_action!='w' && weight_vector_action!='+' && weight_vector_action!='-')
		     showError(1,"Unspecified weight vector action (-v switch)\n");
		   strcpy(weight_file_buffer,&(strchr(argv[arg_index],'v')[2]));
		   arg_index++;
		   continue;   /* so that the path will not trigger other switches */
		}
        /* a block for computing features */
        if (strchr(argv[arg_index],'B'))   
        {  strcpy(arg,argv[arg_index]);
           p=strtok(arg," ,;");
           bounding_rect.x=atoi(p);
           p=strtok(NULL," ,;");
           bounding_rect.y=atoi(p);
           p=strtok(NULL," ,;");
           bounding_rect.w=atoi(p);
           p=strtok(NULL," ,;");
           bounding_rect.h=atoi(p);
		}
        /* mean and stabndard deviation for normalizing the images */
        if (strchr(argv[arg_index],'S'))   
        {  strcpy(arg,argv[arg_index]);
		   p=strchr(arg,':');
		   if (p)                          /* standard deviation is specified */
		   {  stddev=atof(&(p[1]));
              *p='\0';
		   }
		   mean=atof(&(strchr(arg,'S')[1]));   /* mean */
        }
	    if (strchr(argv[arg_index],'m')) multi_processor=1;
        if (strchr(argv[arg_index],'n')) splits_num=atoi(&(strchr(argv[arg_index],'n')[1]));
        if (strchr(argv[arg_index],'s')) print_to_screen=0;
        if (strchr(argv[arg_index],'o')) overwrite=1;
        if (strchr(argv[arg_index],'l')) large_set=1;
        if (strchr(argv[arg_index],'c')) colors=1;
        if (strchr(argv[arg_index],'C')) do_continuous=1;
        if (strchr(argv[arg_index],'d')) downsample=atoi(&(strchr(argv[arg_index],'d')[1]));
        if (char_p = strchr(argv[arg_index],'f')) {
            if (char_p2=strchr(char_p,':')) used_mrmr=atof(char_p2+1);
		    max_features=atof(char_p+1);
		}
        if (char_p = strchr(argv[arg_index],'r')) {
			if (*(char_p+1)=='#') {
				balanced_splits = 0;
				char_p++;
			}
           split_ratio=atof(char_p+1);
        }
        if (strchr(argv[arg_index],'q')) first_n=atoi(&(strchr(argv[arg_index],'q')[1]));
        if (strchr(argv[arg_index],'N')) N=atoi(&(strchr(argv[arg_index],'N')[1]));
        if (strchr(argv[arg_index],'A')) assess_features=200; 
        if (strchr(argv[arg_index],'R')) rotations=4;
        if (char_p = strchr(argv[arg_index],'t')) {
			if (*(char_p+1)=='#') {
				tile_areas = 1;
				char_p++;
				if (*(char_p+1)=='^') {
					max_tile = 1;
					char_p++;
				}
			}
           tiles=atoi(char_p+1);
       }
        if (char_p = strchr(argv[arg_index],'i')) {
			if (*(char_p+1)=='#') {
				exact_training_images = 1;
				char_p++;
			}
           max_training_images=atoi(char_p+1);
        }
        if (strchr(argv[arg_index],'j')) max_test_images=atoi(&(strchr(argv[arg_index],'j')[1]));
        if (strchr(argv[arg_index],'w')) method=0;
        if (strchr(argv[arg_index],'h'))
        {  ShowHelp();
           return(1);
        }
		if (argv[arg_index][1]=='D') {
			dataset_save_fit = argv[arg_index]+1;
	    	arg_index++;
			continue;	/* so that the path will not trigger other switches */
		}
		if (argv[arg_index][1]=='T') {
			testset_save_fit = argv[arg_index]+1;
	    	arg_index++;
			continue;	/* so that the path will not trigger other switches */
		}


        arg_index++;
     }

/* check that the values in the switches are correct */
	if (test && splits_num<=0) showError(1,"splits number (n) must be an integer greater than 0");
	if (test && max_training_images<0) showError(1,"Maximal number of training images (i) must be an integer greater than 0");
	if (test && max_test_images<0) showError(1,"maximal number of test images (j) must be an integer greater than 0");
	if (test && report && arg_index==argc-1) showError(1,"a report html file must be specified");
	if (tiles<=0) showError(1,"number of tiles (t) must be an integer greater than 0");
	if (downsample<1 || downsample>100) showError(1,"downsample size (d) must be an integer between 1 to 100");
	if (split_ratio<0 || split_ratio>1) showError(1,"training fraction (r) must be > 0 and < 1");
	if (splits_num<1 || splits_num>MAX_SPLITS) showError(1,"splits num out of range");
	if (weight_vector_action!='\0' && weight_vector_action!='r' && weight_vector_action!='w' && weight_vector_action!='-' && weight_vector_action!='+') showError(1,"-v must be followed with either 'w' (write) or 'r' (read) ");

	 /* run */
	randomize();   /* random numbers are used for selecting random samples for testing and training */	 
	if (arg_index<argc) {
		int res;
		dataset_path=argv[arg_index++];
		TrainingSet *dataset=new TrainingSet(MAX_SAMPLES,MAX_CLASS_NUM);

		if (train) {
			if (!dataset_save_fit && arg_index < argc && argv[arg_index] && *(argv[arg_index])) dataset_save_fit = argv[arg_index];
			else if (!dataset_save_fit) showError (1,"No output file specified");
				
		// Make sure we can write to the output file before calculating anything.
			FILE *out_file;
			if (!(out_file=fopen(dataset_save_fit,"a"))) {// don't truncate if exists.
				showError (1,"Couldn't open '%s' for writing\n",dataset_save_fit);
				return(0);
			}
			fclose (out_file);
			res=dataset->LoadFromPath(dataset_path,rotations,tiles,multi_processor,large_set,colors,downsample,mean,stddev,&bounding_rect,overwrite,do_continuous);
			if (res < 1) {catError (dataset->error_message); showError(1,"Errors reading from '%s'\n",dataset_path);}
			res = dataset->SaveToFile (dataset_save_fit);
			if (res < 1) {catError (dataset->error_message); showError (1,"Could not save dataset to '%s'.\n",dataset_save_fit);}
			if (print_to_screen) printf ("Saved dataset to '%s'.\n",dataset_save_fit);
	
			// report any warnings
			catError (dataset->error_message);
			showError (0,NULL);

       } else if (test || classify) {
			int ignore_group=0;
			TrainingSet *testset=NULL;
		// Make sure we can write to the dataset output file if we got one before calculating anything.
			FILE *out_file;
			if (dataset_save_fit && !(out_file=fopen(dataset_save_fit,"a"))) {// don't truncate if exists.
				showError (1,"Couldn't open '%s' for writing\n",dataset_save_fit);
				return(0);
			} else if (dataset_save_fit) fclose (out_file);
			if (print_to_screen) printf ("Processing training set '%s'.\n",dataset_path);
			res=dataset->LoadFromPath(dataset_path,rotations,tiles,multi_processor,large_set,colors,downsample,mean,stddev,&bounding_rect,overwrite,do_continuous);
			if (res < 1) {catError (dataset->error_message); showError(1,"Errors reading from '%s'\n",dataset_path);}
			if (dataset_save_fit) {
				res = dataset->SaveToFile (dataset_save_fit);
				if (res < 1) {catError (dataset->error_message); showError (1,"Could not save dataset to '%s'.\n",dataset_save_fit);}
				if (print_to_screen) printf ("Saved dataset to '%s'.\n",dataset_save_fit);
			}
			// Store a copy of the warnings etc
			catError (dataset->error_message);


			/* check if there is a test set feature file */
			if (arg_index<argc && strstr(argv[arg_index],".htm")==NULL) testset_path=argv[arg_index++];

			/* check if there is a report file name */
			if (arg_index<argc) {
				strcpy(report_file_buffer,argv[arg_index]);
				report_file=report_file_buffer;
				report=1;   /* assume that the user wanted a report if a report file was specified */
			}
			
			// Load the test set if there is one
			if (testset_path) {
				if (testset_save_fit && !(out_file=fopen(testset_save_fit,"a"))) {// don't truncate if exists.
					showError (1,"Couldn't open '%s' for writing\n",testset_save_fit);
					return(0);
				} else if (testset_save_fit) fclose (out_file);
				if (print_to_screen) printf ("Processing test set '%s'.\n",testset_path);
				testset=new TrainingSet(MAX_SAMPLES,MAX_CLASS_NUM);
				res=testset->LoadFromPath(testset_path,rotations,tiles,multi_processor,large_set,colors,downsample,mean,stddev,&bounding_rect,overwrite,do_continuous);
				if (res < 1) {catError (testset->error_message); showError(1,"Errors reading from '%s'\n",testset_path);}
				if (testset_save_fit) {
					res = testset->SaveToFile (testset_save_fit);
					if (res < 1) {catError (testset->error_message); showError (1,"Could not save testset to '%s'.\n",testset_save_fit);}
					if (print_to_screen) printf ("Saved testset to '%s'.\n",testset_save_fit);
				}
				// Store a copy of the warnings etc
				catError (testset->error_message);
			} else if (classify) {
				showError (1,"The classify command must have a test set to work on.\n");
			}

			if (classify) {
				if (splits_num > 1) catError (0,"WARNING: -n option is ignored for 'classify'.  Results are based on a single test because there is no randomization.\n");
				splits_num = 1;
				random_splits = 0;
				// defaults are different (-r = 1.0 and random_splits = 0.  Set above, though -r can still be modified).
			}
			for (ignore_group=0;ignore_group<=assess_features;ignore_group++) {
				split_and_test(dataset, report_file, MAX_CLASS_NUM, method, tiles*tiles, split_ratio, balanced_splits, max_features, used_mrmr,splits_num,report,max_training_images,
					exact_training_images,max_test_images,phylib_path,phylip_algorithm,export_tsv,first_n,weight_file_buffer,weight_vector_action,N,
					testset,ignore_group,tile_areas,max_tile,image_similarities, random_splits);
			}
	
			// report any warnings
			catError (dataset->error_message);
			if (testset) catError (testset->error_message);
			showError (0,NULL);
       } // test or classify.
 
 
       
     } // no params left for dataset / test set.
     else ShowHelp();

     return(1);
}



