package fr.univtoulouse2.m2ice;
/*
Program that implements Task Decomposition.

Reference: The Knowledge Flow Graph "KFGfortask2".

Comments: 1) This implements Clustering with the "Runs" as the axes. Each Example has a value 0(wrong classification) or 1(correct classification) for each run.
	  2) The options are discussed below:
		-h Displays the option set.
		-i Input File in Arff. 
		-o Output File (preferably CSV)
		-t Intermediate Output File. This the classification of each example over all the runs. CSV Format
		-s Number of Folds in The Stratified Remove Folds (SubSampler)
		-r Number of Crossvalidation Folds in the Remove Misclassified Filter.
		-n Number of total Runs
		-m Number of Iterations within the FilteredClassifier ( Setting this to Zero Ensures a completely cleansed dataset is obtained.
		-k Number of Clusters.
	  3) To compile:

		Include weka.jar in your project directory (or alternatively set the classpath to point to the directory in which weka.jar is present)
		
		javac -classpath weka.jar:. TaskDecomposition.java

	  4) A test Run with the iris(mutated) dataset that worked used the following parameters:
	
		java -cp weka.jar:. TaskDecomposition -i iris5.arff -o iris5out.csv -t iris5temp.csv -s 20 -r 10 -n 200 -m 0 -k 2
	

Thank You Alex for the Example program. That helped a lot.

-Sanketh

November 2004

*/
 
// Weka Classes
import weka.core.*;
import weka.core.converters.ArffLoader;
import weka.core.converters.CSVLoader;
import weka.classifiers.meta.*;
import weka.clusterers.*;
import weka.filters.supervised.instance.StratifiedRemoveFolds;
import weka.filters.unsupervised.attribute.Add;
import weka.core.Utils;
import weka.classifiers.trees.J48;
import weka.classifiers.meta.FilteredClassifier;
import weka.classifiers.bayes.NaiveBayes;
import weka.filters.unsupervised.instance.RemoveMisclassified;
import weka.clusterers.SimpleKMeans;
import weka.clusterers.EM;
//Java Classes
import java.io.*;
import java.util.*;
import java.lang.*;
import java.lang.Math;
import java.awt.Robot; // Don't ask why!

public class TaskDecomposition implements Serializable 
{

// This class might as well have been a structure.
public static class OneRun implements Serializable
{
	public StratifiedRemoveFolds SRF = new StratifiedRemoveFolds();
	public Instances dataSet;
	public Instances sampleSet;
	public Instances testSet;
	
	//The Meta parameters
	// Vary the classifiers here. This will be useful in evaluating the effect of the classifier on Task Decomposition.

	public RemoveMisclassified RMF = new RemoveMisclassified();
	
	// Naive Bayes seems to work better in most cases.

 	//public J48 RMF_classifier = new J48();
	public NaiveBayes RMF_classifier =new NaiveBayes();
	//public J48 FC_classifier = new J48();
	public NaiveBayes FC_classifier = new NaiveBayes();
	public FilteredClassifier FC = new FilteredClassifier();

	
	public void OneRun()
	{;
	}	
}



public static void main(String[] args)
throws Exception 
{
 	try{
	
	if(Utils.getFlag('h',args)||args.length==0)
		{
			System.out.println("\n\nThis Program Uses the following Options:\n\n-h Displays the option set:\n-i Input File in Arff.\n-o Output File (preferably CSV)\n-t Intermediate Output File. This the classification of each example over all the runs. CSV Format.\n-s Number of Folds in The Stratified Remove Folds (SubSampler)\n-r Number of Crossvalidation Folds in the Remove Misclassified Filter.\n-n Number of total Runs\n-m Number of Iterations within the FilteredClassifier(Setting this to Zero Ensures a completely cleansed dataset is obtained).\n-k Number of Clusters.\n\n");
		System.exit(0);
		}
		
		// Read the Input and Output File names
		String outFile = new String(Utils.getOption('o',args));
		String inFile = new String(Utils.getOption('i',args));
		
		
		File dataSet = new File(inFile);	
		// Specify the number of Folds for the SRF
		int numfolds = (Integer.valueOf(Utils.getOption('s',args))).intValue();
		
		// Specify the number of iterations of the entire process.
		int num_iter = (Integer.valueOf(Utils.getOption('n',args))).intValue();
		
		// Specify the number of iterations within the Meta Classifier (Filtered Classifier).
		int num_meta = (Integer.valueOf(Utils.getOption('m',args))).intValue();


		ArffLoader ArffFile = new ArffLoader();

		ArffFile.setFile(dataSet);

		// Number of Cross Validation Folds
		int num_folds = Integer.valueOf(Utils.getOption('r',args)).intValue();
		//Array to store results across all runs.
		int [][] results = new int[ArffFile.getDataSet().numInstances()][num_iter];
		
		/* To measure the improvement in performance of the classifier we first measure the pre-processing performance.*/
		J48 beforeProcess = new J48();

		Instances olddata = ArffFile.getDataSet();
                olddata.setClassIndex(olddata.numAttributes()-1);
		beforeProcess.buildClassifier(olddata);

                double correct = 0.0;
                double wrong =0.0;
                for(int ni = 0; ni < olddata.numInstances(); ni++)
                {
                        if(beforeProcess.classifyInstance(olddata.instance(ni))==olddata.instance(ni).classValue())
                        correct++;
                        else
                        wrong++;
                }


		System.out.println("The Classification Accuracy before processing: "+(correct*100.0/(correct+wrong))+"%");

		/* Result output above */

		for(int iter=0;iter<num_iter;iter++)
		{
			// Initialize Object that handles each iteration.
			OneRun run = new OneRun();
			
			// Intialize the Training Set and the Test Set and set the Class index.
			run.dataSet = ArffFile.getDataSet();
			run.dataSet.setClassIndex(run.dataSet.numAttributes()-1);
			run.testSet = ArffFile.getDataSet();
			run.testSet.setClassIndex(run.testSet.numAttributes()-1);
			
			//Subsample using Stratified Remove Folds
			run.SRF.setInputFormat(run.dataSet);
			run.SRF.setNumFolds(numfolds);
			Random seed = new Random();
			long s=Math.round(1000.0*seed.nextFloat()/seed.nextFloat());
			run.SRF.setSeed(s);
			run.sampleSet=run.SRF.useFilter(run.dataSet,run.SRF);
			run.sampleSet.setClassIndex(run.sampleSet.numAttributes()-1);
			//System.out.println(run.sampleSet);
			
			//Using the Meta Classifier FilteredClassifier which implements the RemoveMisclassified Filter.
			
			run.RMF.setClassifier(run.RMF_classifier);
			//run.RMF.setInputFormat(run.sampleSet);
			run.RMF.setMaxIterations(num_meta);
			run.RMF.setNumFolds(num_folds);
			run.FC.setClassifier(run.FC_classifier);
			run.FC.setFilter(run.RMF);

			// Build the Classifier
			run.FC.buildClassifier(run.sampleSet);
			
			for(int test_index=0;test_index<run.testSet.numInstances();test_index++)
			{
				// Check if the predicted output matches the original class.
				if(run.FC.classifyInstance(run.testSet.instance(test_index))==(run.testSet.instance(test_index)).classValue())
				{//	//System.out.println(run.FC.classifyInstance(run.testSet.instance(test_index)));
					results[test_index][iter]++;
				}
			}
		//Something to introduce a delay to help with random seed generation. IGNORE!!
		Robot hold = new Robot();
		hold.delay(10);
		}

		//Preparing CSV File for Clustering.
		String tempFile = Utils.getOption('t',args);
		PrintStream CSV = new PrintStream(new FileOutputStream(tempFile));
		
		//CSV.print("example#,");
		for(int i=0;i<num_iter-1;i++)
		{
			CSV.print("run"+i+",");
		}	
		CSV.println("run"+(num_iter-1));

		for(int j=0;j<ArffFile.getDataSet().numInstances();j++)
		{//CSV.print(j+",");
			for(int i1=0;i1<num_iter-1;i1++)
			{
				CSV.print(results[j][i1]+",");
			}
			CSV.println(results[j][num_iter-1]);
		}
		CSV.close();

		//Finished writing CSV File

		//Opening CSV File for Clustering
		CSVLoader CSV_cluster = new CSVLoader();
		CSV_cluster.setFile(new File(tempFile));
		
		Instances clusterData;
		clusterData=CSV_cluster.getDataSet();
		
		//Begin Clustering		
			
		int NumberofClusters=Integer.valueOf(Utils.getOption('k',args)).intValue();	
		
		//Pick One

		// K MEANS BELOW

		/*SimpleKMeans KMeans = new SimpleKMeans();
		Random seed =new Random();
		KMeans.setNumClusters(NumberofClusters);
		KMeans.setSeed((int)Math.round(seed.nextFloat()));

		KMeans.buildClusterer(clusterData);
		*/
	
		//Expectation Maximization
		EM em = new EM();
		Random seed = new Random();
		em.setSeed((int)Math.round(seed.nextFloat()));
		
		em.setMinStdDev(0.01);
		em.setNumClusters(NumberofClusters);
		em.setMaxIterations(100);

		System.out.println("EM CLUSTERING BEGINS");
		em.buildClusterer(clusterData);
		System.out.println("EM CLUSTERING ENDS");
		PrintStream OutFile = new PrintStream(new FileOutputStream(outFile));
		
		// The ADD FILTER TO ADD THE TASK ID AS A NEW ATTRIBUTE
		ArffLoader newArff =new ArffLoader();
		newArff.setFile(dataSet);
		Instances newdata = newArff.getDataSet();
		Add addFilter = new Add();
		addFilter.setAttributeName(new String("task"));
		addFilter.setAttributeIndex("first");
		newdata.setClassIndex(newdata.numAttributes()-1);
		addFilter.setInputFormat(newdata);
	
		// Excuse the grammar.	
		Instances newerdata = addFilter.getOutputFormat();
		newerdata = addFilter.useFilter(newdata,addFilter);	
		
		
		// Output Task IDS
		for(int f=0;f<clusterData.numInstances();f++)
		{
			//Output the clusters (task ids) to new file.
			//OutFile.println(KMeans.clusterInstance(clusterData.instance(f))+" "+f);
			OutFile.println(em.clusterInstance(clusterData.instance(f))+","+f);
			////System.out.println(KMeans.clusterInstance(clusterData.instance(f))+" "+f);
			//newdata.instance(f).insertAttributeAt(0);
			newerdata.instance(f).setValue(0,em.clusterInstance(clusterData.instance(f)));
		}
	
	
		// Checking accuracy improvements after Task Decomposition.
	
		J48 afterProcess = new J48();
		System.out.println(newerdata);
		newerdata.setClassIndex(newerdata.numAttributes()-1);
		afterProcess.buildClassifier(newerdata);
	
		correct = 0.0;
		wrong =0.0;
		for(int ni = 0; ni < newerdata.numInstances(); ni++)
		{
			//System.out.println(afterProcess.classifyInstance(newerdata.instance(ni))+","+newerdata.instance(ni).classValue());
			if(afterProcess.classifyInstance(newerdata.instance(ni))==newerdata.instance(ni).classValue())
			correct++;
			else
			wrong++;
		}

		System.out.println("The Classification Accuaracy after processing is: "+(correct*100.0/(correct+wrong))+"%");
		
		
		OutFile.close();

	}
	catch(Exception e)
	{
		e.printStackTrace();
	}
}

}


/*	*	*	*	*/

/*	Sanketh V Shetty	*/

/*	*	*	*	*/	
	
	








