package cn.edu.xmu.datamining.tangzk.mralgos.kmeans;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import sun.util.LocaleServiceProviderPool.LocalizedObjectGetter;

import cn.edu.xmu.datamining.tangzk.mralgos.AlgorithmDriver;

public class KMeansDriver extends Configured implements Tool {

	private static final Logger LOG = LoggerFactory
			.getLogger(KMeansDriver.class);

	public static final String USER_DEFINED_GROUP = "kmeans.user_defined";
	public static final String CNT_BAD_RECORDS = "kmeans.bad_records";
	public static final String CNT_BAD_CLUSTER_RECOD = "kmeans.bad_cluster_records";
	public static final String CNT_CENTROID_DIFF = "kmeans.centroid_diff";
	public static final String CENTROID_DIFF_THRESHHOLD = "kmeans.centroid_diff_threshhold";
	public static final String K_NAME = "kmeans.k";
	public static final int K_DEF_VALUE = 2;

	public static final String BASE_OUTPUTDIR_NAME = "kmeans.base_output_dir";

	@Override
	public int run(String[] args) throws Exception {
		int exitCode = 0;
		if (args.length < 3) {
			throw new Exception(
					"Usage: <prog> <dataDir> <outDir> <k> [maxIteration=10] [initCentersFile]");
		}
		Path dataDir = new Path(args[0]);
		String baseOutputPath = args[1];
		int k = Integer.parseInt(args[2]);
		int maxIteration = 10;
		if (args.length >= 4) {
			maxIteration = Integer.parseInt(args[3]);
		}
		String initCentersFile = null;
		if (args.length >= 5) {
			initCentersFile = args[4];
		}

		Configuration conf = this.getConf();
		FileSystem fs = FileSystem.get(conf);
		fs.mkdirs(new Path(baseOutputPath + "-iter0"));
		FSDataOutputStream hdfsDataOut = fs.create(new Path(baseOutputPath
				+ "-iter0/part-init-centroids.txt"));
		if (initCentersFile == null) {
			// random initalize the cluster centers.
			LOG.info("random initializing clutser centroids...");
			List<ClusterCentroidWritable> centroidList = new ArrayList<ClusterCentroidWritable>(
					k);
			BufferedWriter bufWriter = new BufferedWriter(
					new OutputStreamWriter(hdfsDataOut));
			ClusterCentroidWritable centroid = null;
			for (int i = 0; i < k; i++) {
				centroid = new ClusterCentroidWritable(i,
						SampleWritable.newRandom());
				centroidList.add(centroid);
				bufWriter.write(centroid.getC() + "\t" + centroid.getSample());
				bufWriter.write('\n');
			}
			bufWriter.close();
		} else {
			// initalize the cluster centers from file
			LOG.info("loading inital cluster centroids from file: {}...",
					initCentersFile);
			FileInputStream fin = new FileInputStream(new File(initCentersFile));
			IOUtils.copyBytes(fin, hdfsDataOut, conf);
		}
		hdfsDataOut.close();

		conf.setInt(KMeansDriver.K_NAME, k);
		conf.set(KMeansDriver.BASE_OUTPUTDIR_NAME, baseOutputPath);

		int iteration = 0;
		long centroidChanged = 1;

		Job job = null;
		while (iteration < maxIteration && centroidChanged > 0 && exitCode == 0) {
			FileStatus[] centerPaths = fs.listStatus(new Path(baseOutputPath
					+ "-iter" + iteration));
			for (FileStatus file : centerPaths) {
				if (!file.isDir()) {
					DistributedCache.addCacheFile(file.getPath().toUri(), conf);
				}
			}
			job = new Job(conf, KMeansDriver.class.getSimpleName() + "-iter"
					+ iteration);

			job.setMapperClass(KMeansMapper.class);
			job.setReducerClass(KMeansReducer.class);
			job.setPartitionerClass(KMeansPartitioner.class);
			job.setGroupingComparatorClass(KMeansGrouper.class);

			job.setMapOutputKeyClass(ClusterCentroidWritable.class);
			job.setMapOutputValueClass(SampleWritable.class);
			job.setOutputKeyClass(IntWritable.class);
			job.setOutputValueClass(Text.class);

			job.setNumReduceTasks(k);

			FileInputFormat.addInputPath(job, dataDir);
			FileInputFormat.addInputPath(job, new Path(baseOutputPath + "-iter"
					+ iteration));
			FileOutputFormat.setOutputPath(job, new Path(baseOutputPath
					+ "-iter" + (iteration + 1)));

			exitCode = AlgorithmDriver.runJob(job);
			centroidChanged = job
					.getCounters()
					.findCounter(KMeansDriver.USER_DEFINED_GROUP,
							KMeansDriver.CNT_CENTROID_DIFF).getValue();
			iteration++;
		}
		if (exitCode == 0) {
			job = new Job(conf, KMeansDriver.class.getSimpleName() + "-result");
			job.setMapperClass(KMeansAssignMapper.class);
			job.setMapOutputKeyClass(IntWritable.class);
			job.setMapOutputValueClass(Text.class);
			job.setNumReduceTasks(0);

			FileInputFormat.addInputPath(job, dataDir);
			FileOutputFormat.setOutputPath(job, new Path(baseOutputPath
					+ "-assigned"));
			exitCode = AlgorithmDriver.runJob(job);
		}
		return exitCode;
	}

	public static void main(String[] args) throws Exception {
		AlgorithmDriver.toolRun(new KMeansDriver(), args);
	}

}
