package index;

import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Reducer;

/**
 * @author Guimin Lin
 * @date Feb 17, 2011
 */
public class IndexCombiner extends MapReduceBase implements
		Reducer<Text, Text, Text, Text> {
	HashSet<String> docIdSet=new HashSet<String>();
	Text resultText = new Text();

	@Override
	public void reduce(Text key, Iterator<Text> values,
			OutputCollector<Text, Text> output, Reporter reporter)
			throws IOException {
		docIdSet.clear();
		int sum = 0, df = 0, offset = 0, docId = 0, diff = 0;
		boolean isFirstRecord = true;
		String postingString;
		StringBuffer resultBuffer = new StringBuffer(512);
		System.err.println("Processing: "+ key.toString());
		while (values.hasNext()) {
			
			postingString = values.next().toString();
			if (!isUnique(postingString)) {
				
				continue;
			}
//			System.err.println(postingString);
			df = Integer.parseInt(postingString.substring(0,
					postingString.indexOf(Index.DF_SYM)));
			docId = Integer.parseInt(postingString.substring(
					postingString.indexOf(Index.DF_SYM) + 1,
					postingString.indexOf(Index.TF_SYM)));
			diff = docId - offset;
			offset = Integer.parseInt(postingString.substring(postingString
					.lastIndexOf(Index.DF_SYM) + 1));
			
			sum += df;
			if (isFirstRecord) {
				
				resultBuffer.append(postingString.substring(
						postingString.indexOf(Index.DF_SYM) + 1,
						postingString.lastIndexOf(Index.DF_SYM)));
				isFirstRecord = false;
			} else {
				//if the coming posting list is too long, flush the current buffer and this posting list
					if (postingString.length() > Index.COMBINER_MAX_SIZE) {
						resultText.set(sum + Index.DF_SYM + resultBuffer.toString() + Index.DF_SYM + offset);
						output.collect(key, resultText);//flush to disk
						
						resultText.set(postingString);
						output.collect(key, resultText);//flush to disk
						
						/*reset*/
						sum = 0;
						offset=0;
						resultBuffer=new StringBuffer();
						isFirstRecord = true;
						/*reset*/
						
					}else {
						resultBuffer.append(Index.SEPERATOR + diff + Index.TF_SYM
								  + postingString.substring(
									postingString.indexOf(Index.TF_SYM) + 1,
									postingString.lastIndexOf(Index.DF_SYM)));
					}
			}
			
			if (resultBuffer.length()>Index.COMBINER_MAX_SIZE) {
				resultText.set(sum + Index.DF_SYM + resultBuffer.toString() + Index.DF_SYM + offset);
				output.collect(key, resultText);//flush to disk
				
				/*reset*/
				sum = 0;
				offset=0;
				resultBuffer=new StringBuffer();
				isFirstRecord = true;
				/*reset*/
				
			}
			
		}
		
		//make sure to flush the final one
		if (!isFirstRecord && resultBuffer.length()>0) {
			resultText.set(sum + Index.DF_SYM + resultBuffer.toString() + Index.DF_SYM + offset);
			output.collect(key, resultText);
		}
		
		System.err.println("----------------------------------------------------");
	}

	private boolean isUnique(String postingString) {
		String docID;
		docID=postingString.substring(
				postingString.indexOf(Index.DF_SYM)+1, 
				postingString.indexOf(Index.TF_SYM));
		if (docIdSet.contains(docID)) {
			System.err.println("Combiner: "+this.toString()+": "+ docID+ " is already in the set!");
			System.err.println("Posting: "+postingString);
			return false;
		} else {
			System.err.println("Combiner: "+this.toString()+": "+ docID+ " now added to set!");
			docIdSet.add(docID);
			System.err.println("Posting: "+postingString);
			return true;
		}
	}
	
	
	/* (non-Javadoc)
	 * @see org.apache.hadoop.mapred.MapReduceBase#close()
	 */
	@Override
	public void close() throws IOException {
		System.gc();
		super.close();
	}

}
