package kmeans;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import kmeans.bean.WXKmeansCentroids;
import kmeans.bean.WXKmeansData;
import kmeans.mapreduce.WXKmeansMapper;
import kmeans.mapreduce.WXKmeansReduce;
import kmeans.mapreduce.WXKmensCombiner;

import java.io.File;


public class WXKmeansRun {
    public static final Logger log = LogManager.getLogger(WXKmeansRun.class);

    //这个主要要配置好
    private static String centroidPath = "hdfs://wangxin:8020/kmeans/";
    private static String inputPath = "hdfs://wangxin:8020/input/kmeans/";
    private static Integer dimension;

    public static void main(String[] args) throws Exception {
        int iteration = 0;
        //第一个参数数据的维度 第二个参数数据文件的路径 第三个参数由于需要迭代每次文件的路径，所以放的是文件所在文件夹的路径
        args = "3 hdfs://wangxin:8020/input/kmeans/data.txt hdfs://wangxin:8020/kmeans".split(" ");
        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", "hdfs://wangxin:8020");


        FileSystem fileSystem = FileSystem.get(configuration);
        GenericOptionsParser optionsParser = new GenericOptionsParser(configuration, args);
        String[] remaining = optionsParser.getRemainingArgs();
        if (remaining.length != 1 && remaining.length != 3) {
            System.err.println("Usage: K-Means <dimension> <input data filepath> <centroidRootPath(without filename , default filename is centroid)>");
            System.exit(2);
        }
        dimension = Integer.valueOf(args[0]);
        if (remaining.length == 3) {
            inputPath = args[1];
            if (!args[2].endsWith(File.separator)) {
                centroidPath = args[2] + File.separator;
            } else centroidPath = args[2];
        }
        System.out.println(centroidPath);
        configuration.set("dimension", dimension.toString());
        String lastCentroid = "";
        String currentCentroid = centroidPath + "centroid";

        do {
            configuration.set("centroid.path", currentCentroid);

            Job job = Job.getInstance(configuration, "K-means: iteration " + iteration);
            job.setJarByClass(WXKmeansRun.class);
            job.setMapperClass(WXKmeansMapper.class);
            job.setCombinerClass(WXKmensCombiner.class);
            job.setReducerClass(WXKmeansReduce.class);

            job.setMapOutputKeyClass(IntWritable.class);
            job.setMapOutputValueClass(WXKmeansData.class);
            job.setOutputKeyClass(IntWritable.class);
            job.setOutputValueClass(Text.class);
            FileInputFormat.addInputPath(job, new Path(inputPath));
            if (fileSystem.exists(new Path(centroidPath + iteration + "/"))) {
                fileSystem.delete(new Path(centroidPath + iteration + "/"), true);
            }
            FileOutputFormat.setOutputPath(job, new Path(centroidPath + iteration + "/"));

            if (!job.waitForCompletion(true)) {
                System.exit(2);
            }

            lastCentroid = currentCentroid;
            currentCentroid = centroidPath + iteration + "/part-r-00000";
            iteration++;
        } while (isContinue(lastCentroid, currentCentroid));
    }

    //isContinue 调用 isEquals方法，isEquals调用arrayEquals方法
    //目的就是判断 两次的中心是不是一致
    public static boolean isContinue(String oldPath, String newPath) {
        boolean flag = false;
        WXKmeansCentroids oldCentroids = new WXKmeansCentroids(dimension, oldPath);
        WXKmeansCentroids newCentroids = new WXKmeansCentroids(dimension, newPath);
        if (!oldCentroids.isEquals(newCentroids, 0.1)) {
            flag = true;
        }
        return flag;
    }
}
