package com.feng.spark.ml;

import java.util.List;
import java.util.Random;
import java.util.regex.Pattern;


import org.apache.spark.SparkConf;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;

import scala.Tuple2;

/**
 * KMeans 聚类算法
 * 
 * @author feng
 *
 */
public class KMeans2 {
	// 参数
	private static Integer K = 5;
	private static String InputPath = "";
	private static String OutputPath = "";
	private static Integer MaxNumberOfIter = 10;
	private static JavaRDD<NpArray> clusters;
	//private static List<NpArray> clusterList;

	private static final Pattern COMMA = Pattern.compile(",");

	/**
	 * 设置初始化参数
	 * 
	 * @param args
	 */
	private static void setArgs(String[] args) {
		String errMsg = "Usage: KMeans k input_path [output_path]";
		if (args.length < 1 || args.length > 3) {
			System.err.println(errMsg);
			System.exit(1);
		}
		K = Integer.valueOf(args[0]);
		InputPath = args[1];
		if (args.length > 2) {
			OutputPath = args[2];
		}

		//System.out.println("args: K:" + K + ",input_path:" + InputPath + ",output_path:" + OutputPath);
	}

	public static void main(String[] args) throws Exception {
		setArgs(args);
		

		SparkConf conf = new SparkConf().setAppName("KMeans");
		JavaSparkContext ctx = new JavaSparkContext(conf);
		JavaRDD<String> dataSource = ctx.textFile(InputPath, 1);

		/*
		 * List<String> lines = dataSource.collect(); for (String line : lines)
		 * { System.out.println("line:"+line); }
		 */

		JavaRDD<NpArray> points = dataSource.map(line -> {
			return new NpArray(COMMA.split(line));
			//return Arrays.asList(COMMA.split(line)).
			//		stream().map(s -> Double.valueOf(s)).collect(Collectors.toList());
		});

		// 缓存数据
		points=points.cache();

		// 初始化
		List<NpArray> clusterList = points.takeSample(false, K, 42);

		for (NpArray sample : clusterList) {
			System.out.println("Sample:" + sample);
		}

		clusters = ctx.parallelize(clusterList);
	//	clusters = clusters.cache();
		
		System.out.println("\ninit cluster!\n");

		for (NpArray c : clusterList) {
			// 打印cluster
			System.out.println("cluster:"+c);
		}

		//JavaPairRDD<NpArray, NpArray> clusterMapping = null;
		Boolean isChange = true;
		
		for (int i = 0; i < MaxNumberOfIter; i++){
			
			clusters=clusters.cache();
			
			System.out.println("\nk-means iterate!\n");
			for (NpArray c : clusters.collect()) {
				// 打印cluster
				System.out.println("cluster:"+c);
			}
			
			isChange = false;
			JavaPairRDD<NpArray, Tuple2<NpArray, Integer>> clusterMapping = points.mapToPair(point -> {
				Double minDist = Double.MAX_VALUE;
				NpArray minCluster = null;
				List<NpArray> list=clusters.collect();
				for (NpArray cluster : list){
					Double dist = cluster.distance(point);
					if (dist < minDist) {
						minCluster = cluster;
						minDist = dist;
					}
				}
				return new Tuple2<>(minCluster, new Tuple2<NpArray,Integer>(point,1));
			});
			
			clusters.unpersist();
			
			JavaPairRDD<NpArray, Tuple2<NpArray, Integer>> clusterSum = clusterMapping.reduceByKey(
					(v1,v2)->new Tuple2<>(v1._1().add(v2._1()),v1._2()+v2._2())
					/*new Function2<Tuple2<NpArray,Integer>, Tuple2<NpArray,Integer>, Tuple2<NpArray,Integer>>() {
					@Override
				public Tuple2<NpArray, Integer> call(Tuple2<NpArray, Integer> v1,
						Tuple2<NpArray, Integer> v2) throws Exception {
					return new Tuple2<NpArray, Integer>(v1._1().add(v2._1()), v1._2()+v2._2());
				}*/
			);
			
			JavaPairRDD<NpArray, NpArray> clusterPair = clusterSum.mapToPair(
					t->new Tuple2<>(t._1(),t._2()._1().divide(t._2()._2()))
					/*new PairFunction<Tuple2<NpArray,Tuple2<NpArray,Integer>>, NpArray, NpArray>() {
				@Override
				public Tuple2<NpArray, NpArray> call(
						Tuple2<NpArray, Tuple2<NpArray, Integer>> t) throws Exception {
					return new Tuple2<NpArray, NpArray>(t._1(),t._2()._1().divide(t._2()._2()));
					return null;
				}
			}*/);
			
			List<Tuple2<NpArray, NpArray>> clusterPairList = clusterPair.collect();
			
			for(Tuple2<NpArray, NpArray> t:clusterPairList){
				Double dist=t._1().distance(t._2());
				if(dist>0.01){
					isChange=true;
					clusters=clusterPair.map(p->p._2());
					break;
				}
			}
			
			if(!isChange){
				break;
			}
		}

		System.out.println("\ncluster end!\n");
		List<NpArray> output2 = clusters.collect();
		for (NpArray c : output2) {
			// 打印cluster
			System.out.println("result:"+c);
		}
	}
}
