package mapreduce;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;


import util.HtmlContentExtractor;

/**
 * @author Guimin Lin
 * @date Feb 1, 2011
 */
public class WordMapper extends MapReduceBase implements Mapper<Text, Text, Text, LongWritable> {
	private final static LongWritable one = new LongWritable(1);
	private Text word = new Text();
	@Override
	public void map(Text key, Text value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException {
		String startTag=null;
		if (key != null && key.toString().trim() != "") {
			String contentString = key.toString().toLowerCase();
			String docTagPattern="<\\s*/\\s*dochdr\\s*>";//start tag of html
			String htmlTagPattern="<\\s*html\\s*/?\\s*>";//end tag of xml: dochdr, also the start position of html
			String htmlContent=null;
			Pattern pattern = Pattern.compile(htmlTagPattern);
			Matcher matcher = pattern.matcher(contentString);

			if (matcher.find()) {//try to find the start tag of html 
				startTag = matcher.group();
				//the rest of the document should be a whole html
				htmlContent = contentString.substring(contentString
						.indexOf(startTag));
				
			}else  {//else try to find the end tag </DOCDHR>
				pattern=Pattern.compile(docTagPattern);
				matcher=pattern.matcher(contentString);
				if (matcher.find()) {
					startTag=matcher.group();
					//the rest of the document should be a whole html
					htmlContent=contentString.substring(contentString.indexOf(startTag)+startTag.length());
				}
			}
			
			if (htmlContent != null) {
				InputStream inputStream = new ByteArrayInputStream(
						htmlContent.getBytes());
//				HTML_Parser parser = new HTML_Parser();
				 HtmlContentExtractor parser=new HtmlContentExtractor();
				if (parser.parseDoc(inputStream)) {//if successfully parse the document
					//extract only English characters
					String text = parser.getDocBuffer().toString()
							.replaceAll("'s", "").replaceAll("[^a-z]+", " ");
					
					//tokenize and output
					StringTokenizer tokenizer = new StringTokenizer(text);
					while (tokenizer.hasMoreTokens()) {
						word.set(tokenizer.nextToken());
						output.collect(word, one);
					}
				}
			}

		}
		
		
	}
	
}
