package com.example.hadoop.mapreduce.kmeans;

import com.example.hadoop.others.Helper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.net.URISyntaxException;

/**
 * 注意: 为了和 DBSCAN 保持统一, 我在每个数据前面增加了 pointIndex
 * 但是 Kmeans 中用不到这个信息, 因此所有质点的 pointIndex 都用-1 来代表
 */

public class KmeansApp {
    public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException, ClassNotFoundException {
        /**
         * 首先指定需要聚类的簇量 k
         * 并且确保 hdfs 上有数据
         * 如果没有数据的话, 就在本地生成随机的数据, 并将数据上传到 hdfs 上
         */
        int k = 5;
        Configuration configuration = Helper.getConfiguration();
        FileSystem fileSystem = Helper.getFileSystem();
        Path dataPath = new Path("/cluster/data.txt");
        Path localPath = new Path("./cluster.data.txt");
        boolean isExists = fileSystem.exists(dataPath);
        if (!isExists) {
            System.out.println("file not exits");
            Helper.generateData(1000000, true);
            fileSystem.copyFromLocalFile(localPath, dataPath);
        }
        Path kmeansBasePath = new Path("/cluster/kmeans");
        isExists = fileSystem.exists(kmeansBasePath);
        if (isExists) {
            fileSystem.delete(kmeansBasePath, true);
        }
        fileSystem.mkdirs(kmeansBasePath);
        /**
         * 初始化 kmeans 的中心点
         * 将质心的位置放到hdfs 上的/cluster/kmeans/centers.txt中
         * 并将初始化的质心位置打印到控制台中
         */
        Helper.initCenters(k, fileSystem, dataPath);
        double[][] centers = Helper.getCenters(fileSystem);
        for (int i = 0; i < centers.length; i++) {
            System.out.println(String.format("[%.5f], [%.5f]", centers[i][0], centers[i][1]));
        }


        /**
         * 进行 20 次迭代
         */
        for (int i = 0; i < 20; i++) {
            // Create a job
            Job job = Job.getInstance(configuration, String.format("Iter%d-/cluster/kmeans/output/iter%d", i + 1, i + 1));

            // setting the driver enter
            job.setJarByClass(KmeansApp.class);

            // setting Mapper and reducer
            job.setMapperClass(KmeansMapper.class);
            job.setReducerClass(KmeansReducer.class);

            job.setCombinerClass(KmeansReducer.class);

            job.setMapOutputKeyClass(IntWritable.class);
            job.setMapOutputValueClass(Text.class);

            job.setOutputKeyClass(IntWritable.class);
            job.setOutputValueClass(Text.class);

            // job input and output
            FileInputFormat.setInputPaths(job, new Path("/cluster/data.txt"));
            if (fileSystem.exists(new Path(String.format("/cluster/kmeans/output/iter%d", i + 1)))) {
                fileSystem.delete(new Path(String.format("/cluster/kmeans/output/iter%d", i + 1)), true);
            }
            FileOutputFormat.setOutputPath(job, new Path(String.format("/cluster/kmeans/output/iter%d", i + 1)));

            // submit job
            // 阻塞进程, 直到本次迭代的 MapReduce 过程执行完毕
            boolean result = job.waitForCompletion(true);
            if (result) {
                // 利用迭代完成的结果来更新质心位置
                Helper.refreshCenters(k, fileSystem, new Path(String.format("/cluster/kmeans/output/iter%d/part-r-00000", i + 1)));
            }
        }

        // 获得每个点的 index
        // get the result
        Job job = Job.getInstance(configuration, "getClusterIndex");

        job.setNumReduceTasks(1);

        // setting the driver enter
        job.setJarByClass(KmeansApp.class);

        // setting Mapper and reducer
        job.setMapperClass(KmeansMapper.class);
        // job.setReducerClass(IdenticalReducer.class);

        // job.setCombinerClass(KmeansReducer.class);

        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(IntWritable.class);
        job.setOutputValueClass(Text.class);

        // job input and output
        FileInputFormat.setInputPaths(job, new Path("/cluster/data.txt"));
        if (fileSystem.exists(new Path(String.format("/cluster/kmeans/output/result")))) {
            fileSystem.delete(new Path(String.format("/cluster/kmeans/output/result")), true);
        }
        FileOutputFormat.setOutputPath(job, new Path(String.format("/cluster/kmeans/output/result")));

        // submit job
        boolean result = job.waitForCompletion(true);
        if (result) {
            System.out.println("program finished");
        }
    }
}
