package cn.edu.xmu.datamining.tangzk.mralgos.kmeans;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * input: <long, sample>
 * output: <class, sample>
 * 
 * output the closest cluster class with the sample.
 * 
 * @version 2013-8-4
 * @author tangzk
 * @Reviewer
 * 
 */
public class KMeansMapper extends
		Mapper<LongWritable, Text, ClusterCentroidWritable, SampleWritable> {

	private static final Logger LOG = LoggerFactory
			.getLogger(KMeansMapper.class);
	private List<ClusterCentroidWritable> clusterCenters = null;
	private boolean isSample = true;

	public static int loadClustersFromDistributedCache(
			List<ClusterCentroidWritable> resultClusters, Configuration conf)
			throws IOException {
		if (resultClusters == null) {
			return -1;
		}
		int badRecords = 0;
		// load clusters from distributed cache
		List<Path> clusteFilePath = new ArrayList<Path>();
		Path[] cacheFiles = DistributedCache.getLocalCacheFiles(conf);
		if (cacheFiles == null) {
			throw new IOException(
					"can't find clusters files in DistributedCache.");
		} else {
			for (Path path : cacheFiles) {
				if (path.getName().startsWith("part")) {
					// cluster file from reduce output
					clusteFilePath.add(path);
				}
			}
		}

		BufferedReader bufReader = null;
		int c = -1;
		SampleWritable sample = null;
		String line = null;
		String[] flds = null;
		for (Path path : clusteFilePath) {
			LOG.info("load clusters from file: {}", path.toString());
			bufReader = new BufferedReader(new FileReader(path.toString()));
			while ((line = bufReader.readLine()) != null) {
				flds = line.split("\t");
				if (flds.length < 2) {
					badRecords++;
					continue;
				}
				c = Math.abs(Integer.parseInt(flds[0]));
				sample = SampleWritable.parseSample(flds[1]);
				// TODO: check duplicate cluster labels, although the sample
				// class labels will diminish after the first iteration
				resultClusters.add(new ClusterCentroidWritable(c, sample));
			}
			bufReader.close();
		}
		return badRecords;
	}

	protected void setup(
			org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ClusterCentroidWritable, SampleWritable>.Context context)
			throws java.io.IOException, InterruptedException {
		Configuration conf = context.getConfiguration();
		FileSplit split = (FileSplit) context.getInputSplit();
		String baseOutputPath = conf.get(KMeansDriver.BASE_OUTPUTDIR_NAME);
		// String filename = "xxx";
		// input is dataset or centers
		if (!split.getPath().toString().contains(baseOutputPath)) {
			clusterCenters = new ArrayList<ClusterCentroidWritable>();
			int badRecords = loadClustersFromDistributedCache(clusterCenters,
					conf);
			context.getCounter(KMeansDriver.USER_DEFINED_GROUP,
					KMeansDriver.CNT_BAD_CLUSTER_RECOD).increment(badRecords);
		} else {
			// may be that cluster never be assigned to any samples
			isSample = false;
		}
		LOG.info("FileSplit: {}, isSample: {}", split.getPath(), isSample);
	}

	protected void map(
			LongWritable key,
			Text value,
			org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ClusterCentroidWritable, SampleWritable>.Context context)
			throws java.io.IOException, InterruptedException {
		String val = value.toString();
		if (isSample) {
			SampleWritable sample = SampleWritable.parseSample(val);
			ClusterCentroidWritable closestCentroid = sample
					.getClosestCluster(clusterCenters);
			if (closestCentroid == null) {
				LOG.error("can't find cluster({}): {}", closestCentroid,
						value.toString());
				context.getCounter(KMeansDriver.USER_DEFINED_GROUP,
						KMeansDriver.CNT_BAD_RECORDS).increment(1);
				return;
			}
			context.write(closestCentroid, sample);
		} else {
			String[] flds = val.split("\t");
			if (flds.length < 2) {
				return;
			}
			int c = Math.abs(Integer.parseInt(flds[0]));
			SampleWritable sample = SampleWritable.parseSample(flds[1]);
			ClusterCentroidWritable centroid = new ClusterCentroidWritable(c,
					sample);
			centroid.setCenter(true);
			context.write(centroid, sample);
		}
	}

	protected void cleanup(
			org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, ClusterCentroidWritable, SampleWritable>.Context context)
			throws java.io.IOException, InterruptedException {
		clusterCenters = null;
	}

}
