package edu.uta.cse6339.facetedinterface.classifier.src.facetedExplorationMain;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Random;

import weka.classifiers.Evaluation;
import weka.classifiers.bayes.NaiveBayesMultinomial;
import weka.classifiers.functions.SMO;
import weka.classifiers.functions.supportVector.PolyKernel;
import weka.core.Attribute;
import weka.core.DenseInstance;
import weka.core.Instances;
import weka.core.converters.ArffSaver;

public class Classifier
{
	ProcessNewsArticles pna = null;
	ClassifierHelper ch = null;
	
	final static int numOfClasses = 10;
	double threshold = 0.6;
	
	String trainingSetFolder = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/input/training_data/";
	String testSetFolder = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/input/test_data/";
	String newDataSetFolder = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/input/new_data/";
	
	String labeledInputArticleFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/input/article_label_training.csv";
	String labeledInputTestArticleFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/input/article_label_testing.csv";
	
	String testarffFileName = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/arff/testdata";// append .arff when you want to use it...";
	String unseenFilearffFileName = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/arff/newdata";// append .arff when you want to use it...";
	String trainarffFileName = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/arff/traindata"; // append .arff when you want to use it...";
	String summaryFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/testsummary.txt";
	
	ArrayList<BayesianClassifierIns> allNBClassifiers;
	ArrayList<SMOClassifierIns> allSVMClassifiers;
	
	// If classifier used is NaiveBayes TRUE, if its SVM, FALSE
	boolean classifierUsedNB = true;
	
	public void beginClassification()
	{
		pna = new ProcessNewsArticles();
		ch = new ClassifierHelper();
		trainClassifier();
		
		//testClassifierNew();
		//makePredictionNew();
		makePrediction();
	}
	
	private void trainClassifier()
	{
		/*
		 * STEPS TO DO:
		 * 1) process all articles, get the top-K words from each article and the union of all these words, will form the entire feature set.
		 * 2) For each article of a type, create a feature vector, putting in the token-score only in the respective slots of d feature vector.
		 * 3) In the above step, you would have put the "type" as the last column value (after the top-k tokens are put up)
		 * 4) Build the classifier using this training Instances..
		 * 
		 * 5) Now, use the built classifier and run it on the "TEST" data. Test data will also have its feature vectors build exactly like
		 * the above..
		 * 6) Estimate the accuracy and get other statistics after evaluating the classifier on the test data..
		 * 
		 * 7) The same classifier can now be used on unseen data to predict labels.. Create a feature vector for each such new article the
		 * same way as above EXCEPTING the last value (which is the type/class) is put up as a "?".
		 * 8) When such instances are run with the classifier, you can get a label(s) associated with the article..
		 */
		// get the top-K words of each article..
		dataFeature df = pna.processArticles(trainingSetFolder, true);
		// set the attributes and class labels in weka DS..
		// classArticles: key=docId, value=set of classes associated with this doc...
		HashMap<Integer, HashSet<Integer>> classArticles = ch.populateClassedArticles(labeledInputArticleFile, df.invertedDocIdMapper);
		
		if(classifierUsedNB)
			allNBClassifiers = new ArrayList<BayesianClassifierIns>();
		else
			allSVMClassifiers = new ArrayList<SMOClassifierIns>();
		// create feature vectors for each article in the training set...
		for(int i=1; i<=numOfClasses; i++)
		{
			// create the instance with all types associated with the dataset..
			Instances trainIns = null;
			trainIns = createClassifierAttributeList("facetedExplorationTrain");
			if(trainIns == null)
			{
				System.out.println("ERROR TRAINING: instance was not initialized for class " + i + " . Gotta debug!");
				continue;
			}
			generateARFFNew(classArticles, df.featureVector, i, trainIns, df.docIdMapper);
			if(classifierUsedNB)
			{
				
				NaiveBayesMultinomial classifier = new NaiveBayesMultinomial();
				try 
		        {
		        	// begin training of classifier..
					// if you don't want to write to an arff, directly use "trainIns" instead of writing an arff, and then readin it back!
					/*String arffFn = trainarffFileName + i + ".arff";
					writeARFFNew(arffFn, trainIns);
					Instances trainData = null;
		    		BufferedReader reader = new BufferedReader(new FileReader(arffFn));
		            trainData = new Instances(reader);
		            reader.close();*/
					trainIns.setClassIndex(trainIns.numAttributes() - 1);
		            System.out.println("Training Started!");
		            classifier.buildClassifier(trainIns);
		            Evaluation eval = new Evaluation(trainIns);
		            eval.crossValidateModel(classifier, trainIns, 10, new Random(1));
		            System.out.println(eval.toSummaryString("\nResults\n======\n", false));
		            System.out.println("Training Completed Successfully!");
		            /*BufferedWriter out = new BufferedWriter(new FileWriter(summaryFile));
			        out.write(eval.toSummaryString("\nResults\n======\n", false));
			        out.write("Feature Vector Size:" + (trainData.numAttributes() - 1) + "\n");
			        out.close();*/
				} 
		        catch (Exception e)
		        {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
		        BayesianClassifierIns ci = new BayesianClassifierIns();
	        	ci.classifier = classifier;
	        	ci.instance = trainIns;
				allNBClassifiers.add(ci);
	        	/*System.out.println("*****************************************");
	        	System.out.println("Number of classes = " + trainIns.numClasses());
				System.out.println(classifier.toString());
				System.out.println("*****************************************");*/
			}
			else
			{
				SMO classifier = new SMO();
				final double complexityParameter = .8;
			    final double toleranceParameter = 0.001;
			    final double exponentPolyKernerl = 0.6;
			    final double smoEpsilon = 0.00001;
			    try
			    {
			    	String[] options = weka.core.Utils.splitOptions("-N 2");
					classifier.setOptions(options);
		            //PolyNomial Kernel
		            PolyKernel polyK = new PolyKernel();
		            polyK.setUseLowerOrder(true);
		            polyK.setExponent(exponentPolyKernerl);
		            classifier.setC(complexityParameter);
		            classifier.setKernel(polyK);
		            classifier.setEpsilon(smoEpsilon);
		            classifier.setToleranceParameter(toleranceParameter);
		            
		         // begin training of classifier..
					// if you don't want to write to an arff, directly use "trainIns" instead of writing an arff, and then readin it back!
					/*String arffFn = trainarffFileName + i + ".arff";
					writeARFFNew(arffFn, trainIns);
					Instances trainData = null;
		    		BufferedReader reader = new BufferedReader(new FileReader(arffFn));
		            trainData = new Instances(reader);
		            reader.close();*/
		    		//Instances trainData = trainIns;
		            trainIns.setClassIndex(trainIns.numAttributes() - 1);
		            System.out.println("Training Started!");
		            classifier.buildClassifier(trainIns);
		            Evaluation eval = new Evaluation(trainIns);
		            eval.crossValidateModel(classifier, trainIns, 10, new Random(1));
		            System.out.println(eval.toSummaryString("\nResults\n======\n", false));
		            System.out.println("Training Completed Successfully!");
		            /*BufferedWriter out = new BufferedWriter(new FileWriter(summaryFile));
			        out.write(eval.toSummaryString("\nResults\n======\n", false));
			        out.write("Feature Vector Size:" + (trainData.numAttributes() - 1) + "\n");
			        out.close();*/
			    }
			    catch(Exception e)
			    {
			    	e.printStackTrace();
			    }
			    SMOClassifierIns ci = new SMOClassifierIns();
	        	ci.classifier = classifier;
		        ci.instance = trainIns;
				allSVMClassifiers.add(ci);
			}
	        
			/*
			 * REMEMBER: The type associated with allClassifiers[i] is actually "i+1". I had started naming types from 1 onwards, but the
			 * index in an arraylist starts from 0. So make sure when you are trying to display a type, taking it out of this arraylist,
			 * add 1 to the index..
			 */
		}
        df = null;
        classArticles = null;
	}
	
	private void testClassifierNew()
	{
		// get the feature vector of each article..
		dataFeature df = pna.processArticles(testSetFolder, false);
		pna.printKeyWordOutputToFile(df.docIdMapper);
		// set the attributes and class labels in weka DS..
		// classArticles: key=docId, value=set of classes associated with this doc...
		HashMap<Integer, HashSet<Integer>> classArticles = ch.populateClassedArticles(labeledInputTestArticleFile, df.invertedDocIdMapper);
		// create feature vectors for each article in the training set...
		Instances oneIns = createClassifierAttributeList("facetedExplorationTest");
		if(oneIns == null)
		{
			System.out.println("ERROR TESTING: instance was not initialized for UNSEEN DATA. Gotta debug!");
		}
		for(int i=1; i<=numOfClasses; i++)
		{
			// create the instance with all types associated with the dataset..
			Instances trainIns = new Instances(oneIns);
			generateARFFNew(classArticles, df.featureVector, i, trainIns, df.docIdMapper);
			try 
	        {
	        	// begin testing of classifier..
				// if you don't want to write to an arff, directly use "trainIns" instead of writing an arff, and then readin it back!
				String arffFn = testarffFileName + i + ".arff";
				writeARFFNew(arffFn, trainIns);
				/*Instances testInstances = null;
	    		BufferedReader reader = new BufferedReader(new FileReader(arffFn));
	    		testInstances = new Instances(reader);
	    		reader.close();*/
				trainIns.setClassIndex(trainIns.numAttributes() - 1);
	            System.out.println("Testing Started!");
	            
	            if(classifierUsedNB)
	            {
	            	NaiveBayesMultinomial classifier = allNBClassifiers.get(i-1).classifier;
	            	Instances trainData = allNBClassifiers.get(i-1).instance;
	            	Evaluation eval = new Evaluation(trainData);
		            eval.evaluateModel(classifier, trainIns);
		            System.out.println(eval.toSummaryString("\nTESTING Results\n======\n", false));
		            System.out.println("Testing Completed Successfully!");
			        BufferedWriter out = new BufferedWriter(new FileWriter(summaryFile));
			        out.write(eval.toSummaryString("\nResults\n======\n", false));
			        out.write("Feature Vector Size:" + (trainData.numAttributes() - 1) + "\n");
			        out.close();
	            }
	            else
	            {
	            	SMO classifier = allSVMClassifiers.get(i-1).classifier;
	            	Instances trainData = allSVMClassifiers.get(i-1).instance;
	            	Evaluation eval = new Evaluation(trainData);
		            eval.evaluateModel(classifier, trainIns);
		            System.out.println(eval.toSummaryString("\nTESTING Results\n======\n", false));
		            System.out.println("Testing Completed Successfully!");
			        BufferedWriter out = new BufferedWriter(new FileWriter(summaryFile));
			        out.write(eval.toSummaryString("\nResults\n======\n", false));
			        out.write("Feature Vector Size:" + (trainData.numAttributes() - 1) + "\n");
			        out.close();
	            }
	        } 
	        catch (Exception e)
	        {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
        df = null;
        classArticles = null;
	}
	
	/*private void makePredictionNew()
	{
		try
		{
			NaiveBayesMultinomial classifier = allNBClassifiers.get(0).classifier;
			Instances unlabeled = new Instances(new BufferedReader(new FileReader("/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/arff/modtestdata.arff")));
			unlabeled.setClassIndex(unlabeled.numAttributes() - 1);
			Instances labeled = new Instances(unlabeled);
			System.out.println(unlabeled.numClasses());
			for (int i = 0; i < unlabeled.numInstances(); i++) {
				   double clsLabel = classifier.classifyInstance(unlabeled.instance(i));
				   System.out.println("Doc " + (int)unlabeled.instance(i).value(unlabeled.attribute("article___Link")) + " : " + (int)clsLabel);
				   labeled.instance(i).setClassValue(clsLabel);
				 }
				 // save labeled data
				 BufferedWriter writer = new BufferedWriter(
				                           new FileWriter("/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/arff/modtestdataLabeled.arff"));
				 writer.write(labeled.toString());
				 writer.newLine();
				 writer.flush();
				 writer.close();
		}
		catch(Exception e)
		{
			e.printStackTrace();
		}
	}*/
	
	private void makePrediction()
	{
		// get the feature vector of each article..
		dataFeature df = pna.processArticles(newDataSetFolder, false);
		pna.printKeyWordOutputToFile(df.docIdMapper);
		// create feature vectors..
		Instances unseenIns = null;
		unseenIns = createClassifierAttributeList("facetedExplorationTest");
		if(unseenIns == null)
		{
			System.out.println("ERROR PREDICTION TESTING: instance was not initialized for class. Gotta debug!");
		}
		generateARFFUnseenData(df.featureVector, unseenIns);
		// The output to be written to a file..
		// key=LabelID, Value=DocumentID... both of these must be mapped to their corresponding strings and then written to a file...
		HashMap<Integer, ArrayList<Integer>> labelDocsInvertedIndex = new HashMap<Integer, ArrayList<Integer>>();
		try 
		{
			// write to ARFF file.. trainingInstances now has what is to be written to an arff file..
			String arffFileName = unseenFilearffFileName + ".arff";
			writeARFFNew(arffFileName, unseenIns);
			// use the classifier obtained to verify the training model..
/*			BufferedReader reader = new BufferedReader(new FileReader(arffFileName));
	        Instances newInstances = new Instances(reader);
	        reader.close();*/
			unseenIns.setClassIndex(unseenIns.numAttributes() - 1);
			for(int i=0; i<unseenIns.size(); i++)
			{
				//System.out.println("Classifying instance " + i + " :");
				for(int j=1; j<=numOfClasses; j++)
				{
					//System.out.println("\tType/Class: " + j);
					double clsLabel;
					if(classifierUsedNB)
					{
						NaiveBayesMultinomial classifier = new NaiveBayesMultinomial();
						classifier = allNBClassifiers.get(j-1).classifier;
						int docid = (int)unseenIns.instance(i).value(unseenIns.attribute("article___Link"));
						double[] labelDistr = classifier.distributionForInstance(unseenIns.instance(i));
						if(labelDistr[0] > threshold)
						{
							// need to add only if its true (1)...
							if(labelDocsInvertedIndex.containsKey(j))
							{
								ArrayList<Integer> docList = labelDocsInvertedIndex.get(j);
								docList.add(docid);
							}
							else
							{
								ArrayList<Integer> docList = new ArrayList<Integer>();
								docList.add(docid);
								labelDocsInvertedIndex.put(j, docList);
							}
						}
						/*System.out.println("\t\tTrue: " + labelDistr[0]);
						System.out.println("\t\tFalse: " + labelDistr[1]);*/
						
						/*System.out.println("Testing doc id = " + docid);
						clsLabel = classifier.classifyInstance(unseenIns.instance(i));
						System.out.println(clsLabel);*/
					}
					else
					{
						SMO classifier = allSVMClassifiers.get(j-1).classifier;
						clsLabel = classifier.classifyInstance(unseenIns.instance(i));
						int docid = (int)unseenIns.instance(i).value(unseenIns.attribute("article___Link"));
						double[] labelDistr = classifier.distributionForInstance(unseenIns.instance(i));
						System.out.println("TRUE : " + labelDistr[0]);
						if(labelDistr[0] > threshold)
						{
							// need to add only if its true (1)...
							if(labelDocsInvertedIndex.containsKey(j))
							{
								ArrayList<Integer> docList = labelDocsInvertedIndex.get(j);
								docList.add(docid);
							}
							else
							{
								ArrayList<Integer> docList = new ArrayList<Integer>();
								docList.add(docid);
								labelDocsInvertedIndex.put(j, docList);
							}
						}
						/*System.out.println("\tPredicted label: " + clsLabel);
						System.out.println("\t\tTrue: " + labelDistr[0]);
						System.out.println("\t\tFalse: " + labelDistr[1]);*/
					}
				}
			}
		}
		catch (Exception e)
		{
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		pna.printLabelOutputFile(df.docIdMapper, labelDocsInvertedIndex);
	}
	
	private void generateARFFUnseenData(HashMap<Integer, HashMap<String, Double>> featureVector, Instances trainIns)
	{
		String classType = "?";
		// IMPORTANT: It is "1+1" (an extra 1) because I am adding a new attribute called "link" which is NOT part of allTokensInFeature
		// So the total number of attributes is the attrs in the feature vector + articlLink + type (the last 1)...
		
		Iterator<Integer> iter = featureVector.keySet().iterator();
		while(iter.hasNext())
		{
			double[] attValues = new double[ProcessNewsArticles.allTokensInFeature.size() + 2];
			DenseInstance instance = new DenseInstance(1, attValues);
		    instance.setDataset(trainIns);
			int docId = iter.next();
			HashMap<String, Double> topKtokens = featureVector.get(docId);
			Iterator<String> tokenIter = topKtokens.keySet().iterator();
			while(tokenIter.hasNext())
			{
				String token = tokenIter.next();
				if(ProcessNewsArticles.allTokensInFeature.contains(token))
				{
					Attribute messageAtt = trainIns.attribute(token);
	                if (messageAtt != null) 
	                {
	                    double c = topKtokens.get(token);
	                    instance.setValue(messageAtt, c);
	                }
				}
			}
			Attribute messageAtt = trainIns.attribute("article___Link");
            if (messageAtt != null) 
            {
            	double c = docId + 0.0;
                instance.setValue(messageAtt, c);
            }
			instance.setValue(trainIns.attribute(ProcessNewsArticles.allTokensInFeature.size()+1), classType);
			trainIns.add(instance);
		}
	}
	
	private void writeARFFNew(String fileName, Instances ins)
	{
		try 
		{
            ArffSaver saver = new ArffSaver();
            saver.setInstances(ins);
            saver.setFile(new File(fileName));
            saver.writeBatch();

        } 
		catch (IOException ex)
		{
            ex.printStackTrace();
        }
	}
	
	private void generateARFFNew(HashMap<Integer, HashSet<Integer>> classArticles, 
			HashMap<Integer, HashMap<String, Double>> featureVector, 
			int type, Instances ins, HashMap<Integer, String> docIdMapper)
	{
		// classArticles: key = docId, value = classes associated with this doc.
		Iterator<Integer> iter = classArticles.keySet().iterator();
		while(iter.hasNext())
		{
			String classType = "";
			int docid = iter.next();
			HashSet<Integer> classList = classArticles.get(docid);
			if(classList.contains(type))
			{
				classType += 1;
			}
			else
			{
				classType += 0;
			}
			if(featureVector.containsKey(docid))
			{
				// IMPORTANT: It is "1+1" (an extra 1) because I am adding a new attribute called "link" which is NOT part of allTokensInFeature
				// So the total number of attributes is the attrs in the feature vector + articleLink + type (the last 1)...
				double[] attValues = new double[ProcessNewsArticles.allTokensInFeature.size() + 1 + 1];
				DenseInstance instance = new DenseInstance(1, attValues);
			    instance.setDataset(ins);
				HashMap<String, Double> topKtokens = featureVector.get(docid);
				Iterator<String> tokenIter = topKtokens.keySet().iterator();
				while(tokenIter.hasNext())
				{
					String token = tokenIter.next();
					if(ProcessNewsArticles.allTokensInFeature.contains(token))
					{
						Attribute messageAtt = ins.attribute(token);
		                if (messageAtt != null) 
		                {
		                    double c = topKtokens.get(token);
		                    instance.setValue(messageAtt, c);
		                }
					}
				}
				Attribute messageAtt = ins.attribute("article___Link");
                if (messageAtt != null) 
                {
                	double c = docid + 0.0;
                    instance.setValue(messageAtt, c);
                }
				instance.setValue(ins.attribute(ProcessNewsArticles.allTokensInFeature.size()+1), classType);
		        ins.add(instance);
			}
		}
	}
	
	private Instances createClassifierAttributeList(String relationName)
	{
		Instances ins = null;
		ArrayList<Attribute> atts = new ArrayList<Attribute>();
		Iterator<String> iter = ProcessNewsArticles.allTokensInFeature.iterator();
		while(iter.hasNext())
		{
			String attribute = iter.next();
			atts.add(new Attribute(attribute));
		}
		String linkAttr = "article___Link";
		atts.add(new Attribute(linkAttr));
		ArrayList<String> attsTypes = new ArrayList<String>();
		attsTypes.add("1");
		attsTypes.add("0");
		attsTypes.add("?");
		atts.add(new Attribute("typeName", attsTypes));
		ins = new Instances(relationName, atts, 0);
		return ins;
	}
}

class BayesianClassifierIns
{
	NaiveBayesMultinomial classifier;
	Instances instance;
}
class SMOClassifierIns
{
	SMO classifier;
	Instances instance;
}