package mth.weka.cltree;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.Reader;

import weka.core.Instances;

public class Cltree extends weka.clusterers.AbstractClusterer {

	/**
	 * CLTree is thought as an implementation of the proposed CLuster Tree Algorithm of Liu et al. 2000
	 * 
	 * this software comes without any warranty for functionality, usability, security and correctness of output
	 * it is nether ready for purpose nor a finished project  
	 * 
	 * Version 20150226-01
	 * possible parameters like splitmeasure and pruning parameters are hard-coded due to complexity issues
	 * "quick and dirty"-Version
	 * new design without classes: 
	 * 		LookAheadSplit - a new method ClusterTreeSplit::lookAhead() is used for this purpose now
	 * 		ClusterTreeResult - obsolete. The Node class and the first Node-Object root are used to represent the Tree 
	 * AttrSplit.java should be able to choose if a split should be LOE or GOE as an attribute value (left vs. right split next to object) 
	 * TODO: nPoints change type from int to double?
	 */
	
	private static final long serialVersionUID = -1445497811684801724L;
	
//	private static Instances data;
	private Node root;
	
	
	//TODO: //ATTENTION!!! FAKE PARAMETER depth!!!
	private static int depth;
	
	
	protected String splitcriterium;
	protected boolean binarySplit;
	protected int maxDepth;
	protected int minNumObj;
	protected double minAcceptedImprovement; //minimal improvement of information gain a split has to generate,
	//otherwise the algorithm will stop
	
	//parameters for pruning
	private boolean prune;
	private double min_rd;
	private double min_y;

	public Cltree(String[] args) {
		for (int a=0; a<args.length; a++) {
			switch (args[a]) {
				case "-Sc": splitcriterium = args[a+1];
				case "-rd": min_rd = Double.parseDouble(args[a+1]);
				case "-my": min_y = Double.parseDouble(args[a+1]);
				case "-ip": minAcceptedImprovement=Double.parseDouble(args[a+1]);
				case "-md": maxDepth = Integer.parseInt(args[a+1]);
				case "-no": minNumObj = Integer.parseInt(args[a+1]);
				case "-bs": binarySplit = Boolean.parseBoolean(args[a+1]);
				case "-pr": prune = Boolean.parseBoolean(args[a+1]);
			}
			a++;
		}
	}
	
	public static double[] addToDoubleArray(double[] array, double value) {
		double[] helper = new double[array.length+1];
		for (int a=0; a<array.length; a++) {
			helper[a]=array[a];
		}
		helper[array.length] = value;
		return helper;
	}
	
	private int getDepth() {
		return depth;
	}
	
	private void buildCLTree(Instances newData, Node parent) {
		//if parent == null it is the root-node
		ClusterTreeSplit cts = new ClusterTreeSplit(splitcriterium, binarySplit);

		if (parent == null) {
			parent = root;//interesting... it's like a pointer, not a copy... thought sth different up to now
		}
		cts.calculateBestSplit(newData, parent);

		if (cts.getInfoGain()<minAcceptedImprovement || getDepth()>=maxDepth) { //stop criteria
			return;
		}
		parent.setSplitAttrib(cts.getSplitAttribIndex());
		parent.setSplitValue(cts.getSplitValue());
		
		parent.addChildNodes(new Node(), new Node());
		parent.getChildNodes()[0].setParent(parent);
		parent.getChildNodes()[1].setParent(parent);
		
		Instances subSet1 = new Instances(newData); //to copy data is more easy than to build up a new structure
		Instances subSet2 = new Instances(newData);

		int s1count = 0;
		int s2count = 0;
		for (int a=0; a<newData.numInstances(); a++) {
			if (newData.instance(a).value(cts.getSplitAttribIndex()) <= cts.getSplitValue()) {
					subSet2.delete(a-s2count);
					s2count++;
			} else { 
					subSet1.delete(a-s1count);
					s1count++;
			}
		}
		
		//removing empty objects from dataset
		subSet1.compactify();
		subSet2.compactify();
		
		//DANGER!!! //TODO remove and implement correct!
		depth++;
//		System.out.println("Split at:"+cts.getSplitPos()+" ("+cts.getSplitValue()+")"); //Output verification tests
		if (subSet1.numInstances() != 0) {
			System.out.println("links:"+ subSet1.toString().split("@data\n")[1].replace('\n', ','));
		}
		if (subSet2.numInstances() != 0) {
			System.out.println("rechts:"+ subSet2.toString().split("@data\n")[1].replace('\n', ',')+"\n");
		}
		
		if (subSet1.numInstances()<minNumObj) {
			return;
		} else {
			buildCLTree(subSet1, parent.getChildNodes()[0]);
		}

		if (subSet2.numInstances()<minNumObj) {
			return;
		} else {
			buildCLTree(subSet2, parent.getChildNodes()[1]);
		}
	}
	
	@Override
	public void buildClusterer(Instances newData) throws Exception {
		if (Node.numNodes()==0) {
			root = new Node();
			root.setNPoints(0);
			root.setYPoints(newData.numInstances());
		}
		buildCLTree(newData, null);
	}


	@Override
	public int numberOfClusters() throws Exception {
		// TODO Auto-generated method stub
		return 0;
	}
	
	public static void main(String[] args) throws Exception {
//	    runClusterer(new Cltree(args), args); //TODO: compare with kmeans clusterer
		File f = new File("trivial.arff");
		Reader r = new BufferedReader(new FileReader(f));
		Instances i = new Instances(r);
		Cltree ct = new Cltree(args);
		
		//parameters for testing
		ct.minNumObj = 1;
		ct.maxDepth = 10;
		ct.minAcceptedImprovement = 0.002;
		ct.splitcriterium="lookahead-cluster";
		
		ct.buildClusterer(i);
//		if (ct.prune) {
//			CLTreePruner pruner = new CLTreePruner();
//			pruner.pruneTree(ct);
//		}
		int x=0;
	}
}
