package edu.indiana.d2i.examples;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.SortedMap;
import java.util.StringTokenizer;

import me.prettyprint.hector.api.beans.HColumn;

import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.dht.RandomPartitioner;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BloomMapFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import edu.indiana.d2i.mapreduce.HectorColumnFamilyInputFormat;
import edu.indiana.d2i.mapreduce.HectorConfigHelper;
import edu.indiana.d2i.util.hector.HTRCClient;

@SuppressWarnings("rawtypes")
public class HTRCWordCount extends Configured implements Tool {
	private final String COLUMN_FAMILY = "VolumeContents";
	private final String KEYSPACE_NAME = "Yimdata";
	private final String CASSANDRA_EPR = "coffeetree.cs.indiana.edu";
	private final String CASSANDRA_CLUSTER_NAME = "Yiming Coffeetree Cluster";
	private final String CASSDANDRA_PORT = "9160";

	private final static IntWritable one = new IntWritable(1);
	private Text word = new Text();
	private final String OUTPUT_PATH = "./word_count";

	public static class TokenizerMapper extends
			Mapper<String, List<HColumn<?, byte[]>>, Text, IntWritable> {
		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();

		public void map(String key, List<HColumn<?, byte[]>> value,
				Context context) throws IOException, InterruptedException {
			// concatenate the pages
			StringBuilder text = new StringBuilder();
			for (HColumn<?, byte[]> hColumn : value) {
				byte[] columnVal = (byte[]) hColumn.getValue();
				text.append(new String(columnVal));
			}

			// map
			StringTokenizer itr = new StringTokenizer(text.toString());
			while (itr.hasMoreTokens()) {
				word.set(itr.nextToken());
				context.write(word, one);
			}
		}
	};

	public static class ReducerToFilesystem extends
			Reducer<Text, IntWritable, Text, IntWritable> {
		public void reduce(Text key, Iterable<IntWritable> values,
				Context context) throws IOException, InterruptedException {
			int sum = 0;
			for (IntWritable val : values)
				sum += val.get();
			context.write(key, new IntWritable(sum));
		}
	}

	public int run(String[] args) throws Exception {
		if (args.length < 2) {
			System.err.println("Usage: keylist_file output_dir");
			return -1;
		}
		String keylistFile = args[0];
		String outputFile = args[1];
		
		Job job = new Job(getConf(), "htrcwordcount");
		job.setJarByClass(HTRCWordCount.class);
		job.setMapperClass(TokenizerMapper.class);
		job.setCombinerClass(ReducerToFilesystem.class);
		job.setReducerClass(ReducerToFilesystem.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		job.setInputFormatClass(HectorColumnFamilyInputFormat.class);
//		FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH));
		FileOutputFormat.setOutputPath(job, new Path(outputFile));

		// connection setting
		HectorConfigHelper.setRpcPort(job.getConfiguration(), CASSDANDRA_PORT);
		HectorConfigHelper.setInitialAddress(job.getConfiguration(),
				CASSANDRA_EPR);
		HectorConfigHelper.setClusterName(job.getConfiguration(),
				CASSANDRA_CLUSTER_NAME);
		HectorConfigHelper.setInputColumnFamily(job.getConfiguration(),
				KEYSPACE_NAME, COLUMN_FAMILY);
		// used to decide splits
		HectorConfigHelper.setPartitioner(job.getConfiguration(),
				"org.apache.cassandra.dht.RandomPartitioner");
		HectorConfigHelper.setInputSplitSize(job.getConfiguration(), 100);
		HectorConfigHelper.setRangeBatchSize(job.getConfiguration(), 10);
		// key slice list
//		String[] keylist = {"loc.ark:/13960/t55d8xj45", "loc.ark:/13960/t55d8xk8j", 
//				"loc.ark:/13960/t55d8xm23", "loc.ark:/13960/t55d8xn4h"};
		BufferedReader reader = new BufferedReader(new FileReader(keylistFile));
		String volume = null;
		List<String> keyArray = new ArrayList<String>();
		while ((volume = reader.readLine()) != null) {
			keyArray.add(volume);
		}
		String[] keylist = keyArray.toArray(new String[keyArray.size()]);
		
		HectorConfigHelper.setKeySliceList(job.getConfiguration(), keylist);
		// the way to read data out from Cassandra
		HectorConfigHelper.setHectorClientClass(job.getConfiguration(),
				HTRCClient.class);

		job.waitForCompletion(true);

		return 0;
	}

	public static void main(String[] args) throws Exception {
		ToolRunner.run(new Configuration(), new HTRCWordCount(), args);
		System.exit(0);
	}
}
