package com.feng.spark.ml;

import java.util.List;
import java.util.Random;
import java.util.regex.Pattern;


import org.apache.spark.SparkConf;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;


import scala.Tuple2;

/**
 * KMeans 聚类算法
 * 
 * @author feng
 *
 */
public class KMeans {
	// 参数
	private static Integer K = 5;
	private static String InputPath = "";
	private static String OutputPath = "";
	private static Integer MaxNumberOfIter = 10;
	private static JavaRDD<NpArray> clusters;

	private static final Pattern COMMA = Pattern.compile(",");

	/**
	 * 设置初始化参数
	 * 
	 * @param args
	 */
	private static void setArgs(String[] args) {
		String errMsg = "Usage: KMeans k input_path [output_path]";
		if (args.length < 1 || args.length > 3) {
			System.err.println(errMsg);
			System.exit(1);
		}
		K = Integer.valueOf(args[0]);
		InputPath = args[1];
		if (args.length > 2) {
			OutputPath = args[2];
		}

		System.out.println("args: K:" + K + ",input_path:" + InputPath + ",output_path:" + OutputPath);
	}

	public static void main(String[] args) {
		setArgs(args);

		SparkConf conf = new SparkConf().setAppName("KMeans");
		JavaSparkContext ctx = new JavaSparkContext(conf);
		JavaRDD<String> dataSource = ctx.textFile(InputPath, 1);

		/*
		 * List<String> lines = dataSource.collect(); for (String line : lines)
		 * { System.out.println("line:"+line); }
		 */

		JavaRDD<NpArray> points = dataSource.map(line -> {
			return new NpArray(COMMA.split(line));
			//return Arrays.asList(COMMA.split(line)).
			//		stream().map(s -> Double.valueOf(s)).collect(Collectors.toList());
		});

		// 缓存数据
		points=points.cache();

		// 初始化
		List<NpArray> clusterList = points.takeSample(false, K, 42);

		for (NpArray sample : clusterList) {
			System.out.println("Sample:" + sample);
		}

		clusters = ctx.parallelize(clusterList);
	//	clusters = clusters.cache();
		System.out.println("\ninit cluster!\n");

		List<NpArray> output = clusters.collect();
		for (NpArray c : output) {
			// 打印cluster
			System.out.println("cluster:"+c);
		}

		JavaPairRDD<NpArray, NpArray> clusterMapping = null;

		for (int i = 0; i < MaxNumberOfIter; i++){
	//		clusters = clusters.cache();
			List<NpArray> clusterList1 = clusters.collect();
			Boolean isChange = false;
			JavaPairRDD<NpArray, NpArray> newClusterMapping = points.mapToPair(point -> {
				Double minDist = Double.MAX_VALUE;
				NpArray minCluster = null;
				for (NpArray cluster : clusterList1) {
					Double dist = cluster.distance(point);
					if (dist < minDist) {
						minCluster = cluster;
						minDist = dist;
					}
				}
				return new Tuple2<NpArray, NpArray>(point, minCluster);
			});
	//		clusters = clusters.unpersist();

			//newClusterMapping=newClusterMapping.cache();

			if (null == clusterMapping) {
				isChange = true;
			} else {
				/*isChange = !newClusterMapping.join(clusterMapping).map(p -> {
					return p._2()._1().distance(p._2()._2()) < 0.01;
				}).reduce((a, b) -> a && b);*/
			}

		//	clusterMapping = newClusterMapping;
			isChange=true;
			if (isChange) {
				JavaPairRDD<NpArray, NpArray> reverseClusterMapping = clusterMapping
						.mapToPair(t -> new Tuple2<>(t._2(), t._1()));

				//reverseClusterMapping=reverseClusterMapping.cache();
				JavaPairRDD<NpArray, Integer> count = reverseClusterMapping.mapToPair(t -> new Tuple2<>(t._1, 1))
						.reduceByKey((a, b) -> a + b);
				
				//reverseClusterMapping.reduceByKey((a, b) -> a.add(b));
				
				JavaPairRDD<NpArray, NpArray> sum = reverseClusterMapping.reduceByKey((a, b) -> { 
					return a.add(b);
				});
				
			//	sum=sum.cache();
			//	count=count.cache();

				// 清除缓存
				//reverseClusterMapping.unpersist();
				
				// 打印sum count
				System.out.println("\nk-means sum!\n");
				List<Tuple2<NpArray, NpArray>> sumOut = sum.collect();
				for(Tuple2<NpArray, NpArray> t:sumOut){
					System.out.println("sum:"+t._1()+","+t._2());
				}
				System.out.println("\nk-means count!\n");
				List<Tuple2<NpArray, Integer>> countOut = count.collect();
				for(Tuple2<NpArray, Integer> t:countOut){
					System.out.println("count:"+t._1()+t._2());
				}

				// 重新计算 
				JavaRDD< NpArray> newClusters = sum.join(count).mapToPair(t -> {
					return new Tuple2<>(t._1(), t._2()._1().divide(t._2()._2()));
				}).map(t -> t._2());

				clusters = newClusters;
			} else {
				break;
			} 
			System.out.println("\nk-means iterate!\n");
			List<NpArray> output1 = clusters.collect();
			for (NpArray c : output1) {
				// 打印cluster
				System.out.println("cluster:"+c);
			}
		}

		System.out.println("\n\ncluster end!");
		List<NpArray> output2 = clusters.collect();
		for (NpArray c : output2) {
			// 打印cluster
			System.out.println("cluster:"+c);
		}

	}
}
