package edu.indiana.d2i.mapreduce;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.ListIterator;
import java.util.SortedMap;

import me.prettyprint.cassandra.service.CassandraHostConfigurator;
import me.prettyprint.cassandra.service.ThriftCluster;
import me.prettyprint.hector.api.beans.HColumn;
import me.prettyprint.hector.api.ddl.ColumnFamilyDefinition;
import me.prettyprint.hector.api.ddl.ComparatorType;
import me.prettyprint.hector.api.ddl.KeyspaceDefinition;

import org.apache.cassandra.dht.RandomPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.thrift.TokenRange;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

public class HectorColumnFamilyInputFormat extends
		InputFormat<Object, List<HColumn<?, byte[]>>> {
	@Override
	public List<InputSplit> getSplits(JobContext context) throws IOException,
			InterruptedException {
		Configuration conf = context.getConfiguration();
		List<InputSplit> splits = new ArrayList<InputSplit>();

		List<String> hosts = new ArrayList<String>();
		hosts.add("coffeetree.cs.indiana.edu");

		ByteBuffer[] keySliceList = HectorConfigHelper.getKeySliceList(conf);
		int splitSize = HectorConfigHelper.getInputSplitSize(conf);
		int num = keySliceList.length / splitSize;
		int remain = keySliceList.length % splitSize;
		int index = 0;
		while (index < num) {
			ByteBuffer[] tmp = new ByteBuffer[splitSize];
			System.arraycopy(keySliceList, index * splitSize, tmp, 0,
					splitSize);
			splits.add(new HectorColumnFamilySplit((List<ByteBuffer>)Arrays.asList(tmp), hosts));
			index++;
		}
		ByteBuffer[] tmp = new ByteBuffer[remain];
		System.arraycopy(keySliceList, index * splitSize, tmp, 0, remain);
		splits.add(new HectorColumnFamilySplit((List<ByteBuffer>)Arrays.asList(tmp), hosts));

		return splits;

		// Configuration conf = context.getConfiguration();
		//
		// // get node token range
		// ThriftCluster cluster = new ThriftCluster(
		// HectorConfigHelper.getClusterName(conf),
		// new CassandraHostConfigurator(
		// HectorConfigHelper.getInitialAddress(conf) + ":"
		// + HectorConfigHelper.getRpcPort(conf)));
		// List<TokenRange> masterRangeNodes = cluster
		// .describeRing(HectorConfigHelper.getInputKeyspace(conf));
		//
		// // create splits
		// List<InputSplit> splits = new ArrayList<InputSplit>();
		// LinkedList<ByteBuffer> keyFilterList = new
		// LinkedList<ByteBuffer>(Arrays
		// .asList(HectorConfigHelper.getKeySliceList(conf)));
		// Token.TokenFactory<?> tf = new RandomPartitioner().getTokenFactory();
		// for (TokenRange tokenRange : masterRangeNodes) {
		// Token<?> startToken = tf.fromString(tokenRange.getStart_token());
		// Token<?> endToken = tf.fromString(tokenRange.getEnd_token());
		// List<ByteBuffer> keys = new ArrayList<ByteBuffer>();
		// ListIterator<ByteBuffer> iter = keyFilterList.listIterator();
		// while (iter.hasNext()) {
		// ByteBuffer tmpkey = iter.next();
		// Token<?> token = tf.fromByteArray(tmpkey);
		// if (HectorColumnFamilyUtilies.withinRange(startToken, endToken,
		// token)) {
		// keys.add(tmpkey);
		// iter.remove();
		// }
		// }
		//
		// splits.add(new HectorColumnFamilySplit(keys,
		// tokenRange.getEndpoints()));
		// }
		// return splits;
	}

	@Override
	public RecordReader<Object, List<HColumn<?, byte[]>>> createRecordReader(
			InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
			throws IOException, InterruptedException {
		return new HectorColumnFamilyRecordReader();
	}
}
