package index;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import javax.swing.text.Document;

import org.apache.hadoop.mapred.Mapper;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

import util.HtmlContentExtractor;
import util.Postings;

/**
 * @author Guimin Lin
 * @date Feb 17, 2011
 */
public class IndexMapper extends MapReduceBase implements
		Mapper<Text, Text, Text, Text> {

	private Text wordText = new Text();
	private Text valueText = new Text();
	final String docTagPattern = "<\\s*/\\s*dochdr\\s*>";// start tag of html
	final String htmlTagPattern = "<\\s*html\\s*/?\\s*>";// end tag of xml:
															// dochdr, also the
															// start position of
															// html
	final String docIdPattern = "<\\s*docid\\s*>(\\d+)<\\s*/\\s*docid\\s*>";
	final String docStartTag = "</dochdr>";
	final String docEndTag = "</doc>";
	HashSet<String> termSet;
	HashSet<String> stopWordSet;
	HashSet<String> coreWordSet;
	StringBuffer buf ;
	/**
	 * initialization
	 */
	public IndexMapper() {
		stopWordSet = new HashSet<String>();
		termSet = new HashSet<String>();
		coreWordSet = new HashSet<String>();

		coreWordSet.add("information");
		coreWordSet.add("age");
		coreWordSet.add("largescale");
		coreWordSet.add("web");
		coreWordSet.add("retrieval");

		stopWordSet.add("a");
		stopWordSet.add("an");
		stopWordSet.add("and");
		stopWordSet.add("are");
		stopWordSet.add("as");
		stopWordSet.add("at");
		stopWordSet.add("by");
		stopWordSet.add("be");
		stopWordSet.add("c");
		stopWordSet.add("data");
		stopWordSet.add("e");
		stopWordSet.add("for");
		stopWordSet.add("from");
		stopWordSet.add("i");
		stopWordSet.add("if");
		stopWordSet.add("in");
		stopWordSet.add("is");
		stopWordSet.add("it");
		stopWordSet.add("not");
		stopWordSet.add("of");
		stopWordSet.add("on");
		stopWordSet.add("or");
		stopWordSet.add("k");
		stopWordSet.add("so");
		stopWordSet.add("s");
		stopWordSet.add("that");
		stopWordSet.add("the");
		stopWordSet.add("this");
		stopWordSet.add("to");
		stopWordSet.add("with");
	}

	@Override
	public void map(Text key, Text value, OutputCollector<Text, Text> output,
			Reporter reporter) throws IOException {
		termSet.clear();
		int docId = 0;
		if (key != null && key.toString().trim() != "") {
			String contentString = key.toString().toLowerCase();

			String htmlContent = null;

			/* get doc id */
			Pattern pattern = Pattern.compile(docIdPattern);
			Matcher matcher = pattern.matcher(contentString);
			if (matcher.find()) {
				docId = Integer.parseInt(matcher.group(1));
			} else {
				return;
			}

			/* get html body */
			int tagIndex = -1;
			tagIndex = contentString.indexOf(docStartTag);
			if (-1 != tagIndex) {
				htmlContent = contentString.substring(tagIndex	+ docStartTag.length());
			}

			if (htmlContent != null) {
				InputStream inputStream = new ByteArrayInputStream(
						htmlContent.getBytes());
				// HTML_Parser parser = new HTML_Parser();
				HtmlContentExtractor parser = new HtmlContentExtractor();
				if (parser.parseDoc(inputStream)) {// if successfully parse the
													// document
					inputStream=null;
					// extract only English characters
					String text = parser.getDocBuffer().toString()
							.replaceAll("'s", "").replaceAll("[^a-z]+", " ");

					// tokenize and output
					StringTokenizer tokenizer = new StringTokenizer(text);
					
					while (tokenizer.hasMoreTokens()) {
						String word = tokenizer.nextToken();

						/* bypass stop words */
						if (stopWordSet.contains(word)) {
							continue;
						}

//						 if (!coreWordSet.contains(word)) {
//						 continue;
//						 }

						/* generate posting list for each term */
						if (!termSet.contains(word)) {
							termSet.add(word);
							buf=new StringBuffer(128);
							buf.append("1;" + docId + ":");// df and doc id

							int index = -1, termFreq = 0, preIndex = 0;
							
							index = text.indexOf(word);

							/*
							 * Append all the position in this Document for this
							 * term
							 */
							while (-1 != index) {
								termFreq++;
								buf.append((index - preIndex) + ",");
								preIndex = index;
								index = text.indexOf(word, index + word.length());

							}
							buf.append(termFreq + ";" + docId);
							wordText.set(word);
							valueText.set(buf.toString());
							output.collect(wordText, valueText);

							if (buf.length() > Index.COMBINER_MAX_SIZE * 3) {
								System.err.println("Buffer size: "
										+ buf.length());
								System.gc();
							}
							buf=null;
								
						}

					}
					
					tokenizer=null;
					text=null;
					

				}
			}

		}

	}



}
