package index;

import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Reducer;

/**
 * @author Guimin Lin
 * @date Feb 17, 2011
 */
public class IndexReducer extends MapReduceBase implements
		Reducer<Text, Text, Text, Text> {
	HashSet<String> docIdSet = new HashSet<String>();
	Text resultText = new Text();

	@Override
	public void reduce(Text key, Iterator<Text> values,
			OutputCollector<Text, Text> output, Reporter reporter)throws IOException {
		docIdSet.clear();//clear this set for each term
		
		int sum = 0, df = 0, offset = 0, docId = 0, diff = 0;
		boolean isFirstRecord = true;
		String postingString, keyString;
		StringBuffer resultBuffer = new StringBuffer(512);
		int duplicateCounter = 1;

		Index.printToErr("Processing Key: " + key.toString());

		while (values.hasNext()) {

			postingString = values.next().toString();

			//check whether it is duplicated
			if (!isUnique(postingString)) {
				continue;
			}

			Index.printToErr("Processing posting: " + postingString);

			df = Integer.parseInt(postingString.substring(0,
					postingString.indexOf(Index.DF_SYM)));
			docId = Integer.parseInt(postingString.substring(
					postingString.indexOf(Index.DF_SYM) + 1,
					postingString.indexOf(Index.TF_SYM)));

			sum += df;
			diff = docId - offset;// previous offset
			// update offset
			offset = Integer.parseInt(postingString.substring(postingString
					.lastIndexOf(Index.DF_SYM) + 1));

			if (isFirstRecord) {//the first record for this term
				resultBuffer.append(postingString.substring(
						postingString.indexOf(Index.DF_SYM) + 1,
						postingString.lastIndexOf(Index.DF_SYM)));
				isFirstRecord = false;
			} else {
				//if it exceeds the predefined maximum size, output
				if (postingString.length() > Index.REDUCE_MAX_SIZE) {
					resultText.set((sum - df) + Index.DF_SYM
							+ resultBuffer.toString());// subtract from sum
					keyString = key.toString();
					output.collect(new Text(keyString), resultText);// flush to
																	// disk

					Index.printToErr("Output buffer posting: [ " + keyString
							+ "\t" + resultText.toString() + " ]");

					duplicateCounter++;

					// flush to disk
					postingString = postingString.substring(0,postingString.lastIndexOf(Index.DF_SYM));
					resultText.set(postingString);
					keyString = key.toString();
					output.collect(new Text(keyString), resultText);// flush to
																	// disk

					Index.printToErr("Output big single posing: [ " + keyString	+ "\t" + resultText.toString() + " ]");

					duplicateCounter++;

					/* reset */
					sum = 0;
					offset = 0;
					resultBuffer = new StringBuffer();
					isFirstRecord = true;
					/* reset */
					
				} else {//append

					Index.printToErr("Appending posting: [ " + postingString
							+ " ] to buffer: [ " + resultBuffer.toString()
							+ " ]");

					resultBuffer.append(Index.SEPERATOR
							+ diff
							+ Index.TF_SYM
							+ postingString.substring(
									postingString.indexOf(Index.TF_SYM) + 1,
									postingString.lastIndexOf(Index.DF_SYM)));

				}

			}
			
			//if it exceeds the predefined maximum size, output
			if (resultBuffer.length() > Index.REDUCE_MAX_SIZE) {
				resultText.set(sum + Index.DF_SYM + resultBuffer.toString());
				keyString = key.toString();

				output.collect(new Text(keyString), resultText);// flush to disk
				Index.printToErr("Output big buffer: [ " + keyString + "\t"
						+ resultText.toString() + " ]");
				duplicateCounter++;

				/* reset */
				sum = 0;
				offset = 0;
				resultBuffer = new StringBuffer();
				isFirstRecord = true;
				/* reset */

			}

		}

		if (!values.hasNext() && resultBuffer.length() > 0) {// reach the last
																// value, flush
																// the buffer
			keyString = key.toString();
			resultText.set(sum + Index.DF_SYM + resultBuffer.toString());
			output.collect(new Text(keyString), resultText);
			Index.printToErr("Output posting, last one: [ " + keyString + "\t"
					+ resultText.toString() + " ]");
		}
		Index.printToErr("----------------------------------------------------");
	}

	/**
	 * Check whether this posting list is duplicated
	 * 
	 * @param postingString
	 * @return
	 */
	private boolean isUnique(String postingString) {
		String docID;
		docID = postingString.substring(
				postingString.indexOf(Index.DF_SYM) + 1,
				postingString.indexOf(Index.TF_SYM));
		if (docIdSet.contains(docID)) {
			return false;
		} else {

			docIdSet.add(docID);
			return true;
		}

	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see org.apache.hadoop.mapred.MapReduceBase#close()
	 */
	@Override
	public void close() throws IOException {
		System.gc();
		super.close();
	}
}
