package cn.gogetter.clusterer;

import java.io.File;

import weka.clusterers.ClusterEvaluation;
import weka.clusterers.Clusterer;
import weka.clusterers.SimpleKMeans;
import weka.core.Instances;
import weka.core.Utils;
import weka.core.converters.ArffLoader;
// 批量训练KMeans算法
public class KMeansClusterer {
	public static void main(String[] args){
		// 加载数据
		Instances data = null;
		Instances data2 = null;
		String path = new File("").getAbsolutePath()+"\\data\\";
		ArffLoader loader = new ArffLoader();
		try {
			loader.setFile(new File(path + "weather.numeric.arff"));
			data = loader.getDataSet();
			loader.setFile(new File(path + "weather.numeric2.arff"));
			data2 = loader.getDataSet();
			// 构建聚类器
			String[] options = new String[2];
			options[0] = "-N"; // 设置簇的个数
			options[1] = "2";
			Clusterer clusterer = new SimpleKMeans();
			((SimpleKMeans) clusterer).setOptions(options);
			clusterer.buildClusterer(data);
//			System.out.println("输出数据==========================");
//			System.out.println(data);
			System.out.println("// 输出生成模型");
			System.out.println(clusterer);
			// 评估聚类器
			ClusterEvaluation eval = new ClusterEvaluation();
			eval.setClusterer(clusterer);
			eval.evaluateClusterer(data2);
//			// 输出结果
			System.out.println("// 输出评估结果");
			System.out.println(eval.clusterResultsToString());
		
			System.out.println("// 输出分布结果");
			for(int i = 0; i < data2.numInstances(); i++){
				int cluster = clusterer.clusterInstance(data2.instance(i));
				double[] dist = clusterer.distributionForInstance(data2.instance(i));
				
				System.out.print(i+" - ");
				System.out.print(data2.get(i)+" - ");
			 
				System.out.print(cluster);
				System.out.print(" - "); 
				System.out.print(Utils.arrayToString(dist));
				System.out.print("\r\n"); 
			}
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}
}
