package clustering;

import java.io.BufferedWriter;
import java.io.FileWriter;

import weka.clusterers.ClusterEvaluation;
import weka.clusterers.SimpleKMeans;
import weka.core.Instances;
import weka.core.converters.ArffLoader;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.AddCluster;

public class Clusterer {

	public void cluster() {
		ArffLoader loader = new ArffLoader();
		try {

			// loader.setFile(new File("temp.arff"));
			// Instances structure = loader.getStructure();
			// train Cobweb
			// Cobweb cw = new Cobweb();
			// cw.buildClusterer(structure);
			// Instance current;
			// while ((current = loader.getNextInstance(structure)) != null)
			// cw.updateClusterer(current);
			// cw.updateFinished();
			// output generated model
			// System.out.println(cw);

			DataSource source = new DataSource("temp.arff");
			Instances data = source.getDataSet();
			String[] options = new String[8];
			options[0] = "-N";
			options[1] = "10";
			options[2] = "-A";
			options[3] = "weka.core.EuclideanDistance -R first-last";
			options[4] = "-I";
			options[5] = "100";
			options[6] = "-S";
			options[7] = "10";

			SimpleKMeans clusterer = new SimpleKMeans();
			clusterer.setOptions(options);
			clusterer.buildClusterer(data);

			ClusterEvaluation eval = new ClusterEvaluation();
			eval.setClusterer(clusterer);
			eval.evaluateClusterer(new Instances(data));
			System.out.println(eval.clusterResultsToString());

			AddCluster filter = new AddCluster();
			filter.setInputFormat(data);
			Instances newData = Filter.useFilter(data, filter);
			BufferedWriter writer = new BufferedWriter(new FileWriter(
					"test_out.arff"));
			writer.write(newData.toString());
			writer.flush();
			writer.close();

		} catch (Exception ex) {
			ex.printStackTrace();
		}
	}

}
